2024-12-03 23:26:02,623 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-03 23:26:02,638 main DEBUG Took 0.013375 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-03 23:26:02,639 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-03 23:26:02,639 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-03 23:26:02,640 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-03 23:26:02,642 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 23:26:02,653 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-03 23:26:02,676 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,678 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 23:26:02,679 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,679 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 23:26:02,680 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,680 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 23:26:02,683 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,683 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 23:26:02,684 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,684 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 23:26:02,686 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,687 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 23:26:02,688 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,688 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 23:26:02,689 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,689 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 23:26:02,690 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,690 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 23:26:02,691 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,691 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 23:26:02,692 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,692 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 23:26:02,693 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,694 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 23:26:02,695 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,696 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-03 23:26:02,699 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 23:26:02,704 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-03 23:26:02,709 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-03 23:26:02,710 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-03 23:26:02,712 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-03 23:26:02,712 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-03 23:26:02,730 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-03 23:26:02,734 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-03 23:26:02,737 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-03 23:26:02,738 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-03 23:26:02,738 main DEBUG createAppenders(={Console}) 2024-12-03 23:26:02,739 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-12-03 23:26:02,740 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-03 23:26:02,740 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-12-03 23:26:02,741 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-03 23:26:02,741 main DEBUG OutputStream closed 2024-12-03 23:26:02,741 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-03 23:26:02,742 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-03 23:26:02,742 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-12-03 23:26:02,890 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-03 23:26:02,893 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-03 23:26:02,895 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-03 23:26:02,896 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-03 23:26:02,897 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-03 23:26:02,897 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-03 23:26:02,898 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-03 23:26:02,900 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-03 23:26:02,900 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-03 23:26:02,905 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-03 23:26:02,905 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-03 23:26:02,906 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-03 23:26:02,906 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-03 23:26:02,906 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-03 23:26:02,907 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-03 23:26:02,907 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-03 23:26:02,907 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-03 23:26:02,908 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-03 23:26:02,912 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03 23:26:02,913 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-12-03 23:26:02,913 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-03 23:26:02,914 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-12-03T23:26:02,934 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-03 23:26:02,939 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-03 23:26:02,939 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03T23:26:03,385 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458 2024-12-03T23:26:03,386 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-12-03T23:26:03,388 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-12-03T23:26:03,524 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-03T23:26:03,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T23:26:03,837 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb, deleteOnExit=true 2024-12-03T23:26:03,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T23:26:03,840 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/test.cache.data in system properties and HBase conf 2024-12-03T23:26:03,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T23:26:03,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir in system properties and HBase conf 2024-12-03T23:26:03,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T23:26:03,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T23:26:03,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T23:26:03,940 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T23:26:03,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T23:26:03,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T23:26:03,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T23:26:03,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T23:26:03,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T23:26:03,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T23:26:03,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T23:26:03,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T23:26:03,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T23:26:03,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/nfs.dump.dir in system properties and HBase conf 2024-12-03T23:26:03,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/java.io.tmpdir in system properties and HBase conf 2024-12-03T23:26:03,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T23:26:03,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T23:26:03,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T23:26:05,206 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-03T23:26:05,295 INFO [Time-limited test {}] log.Log(170): Logging initialized @3577ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-03T23:26:05,382 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T23:26:05,469 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T23:26:05,509 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T23:26:05,510 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T23:26:05,512 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T23:26:05,538 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T23:26:05,542 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72770802{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,AVAILABLE} 2024-12-03T23:26:05,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15a5d53b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T23:26:05,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@150ffd7b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/java.io.tmpdir/jetty-localhost-42843-hadoop-hdfs-3_4_1-tests_jar-_-any-14397252056582867315/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T23:26:05,796 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:42843} 2024-12-03T23:26:05,796 INFO [Time-limited test {}] server.Server(415): Started @4079ms 2024-12-03T23:26:06,316 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T23:26:06,324 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T23:26:06,325 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T23:26:06,325 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T23:26:06,326 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T23:26:06,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b6c5d7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,AVAILABLE} 2024-12-03T23:26:06,328 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66b0eb98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T23:26:06,433 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b343855{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/java.io.tmpdir/jetty-localhost-35253-hadoop-hdfs-3_4_1-tests_jar-_-any-15082305239137414666/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T23:26:06,434 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33a58c38{HTTP/1.1, (http/1.1)}{localhost:35253} 2024-12-03T23:26:06,434 INFO [Time-limited test {}] server.Server(415): Started @4717ms 2024-12-03T23:26:06,504 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T23:26:06,669 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T23:26:06,677 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T23:26:06,692 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T23:26:06,692 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T23:26:06,693 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T23:26:06,694 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8c924b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,AVAILABLE} 2024-12-03T23:26:06,695 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fcad17b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T23:26:06,834 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f639787{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/java.io.tmpdir/jetty-localhost-44045-hadoop-hdfs-3_4_1-tests_jar-_-any-1532983588714020593/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T23:26:06,835 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@439d9c33{HTTP/1.1, (http/1.1)}{localhost:44045} 2024-12-03T23:26:06,835 INFO [Time-limited test {}] server.Server(415): Started @5118ms 2024-12-03T23:26:06,839 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T23:26:06,894 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T23:26:06,900 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T23:26:06,902 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T23:26:06,902 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T23:26:06,902 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T23:26:06,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49217e61{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,AVAILABLE} 2024-12-03T23:26:06,904 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fa1fc7f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T23:26:07,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41fe4483{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/java.io.tmpdir/jetty-localhost-37007-hadoop-hdfs-3_4_1-tests_jar-_-any-12616249891087870682/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T23:26:07,039 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d761894{HTTP/1.1, (http/1.1)}{localhost:37007} 2024-12-03T23:26:07,039 INFO [Time-limited test {}] server.Server(415): Started @5322ms 2024-12-03T23:26:07,042 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T23:26:08,096 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data2/current/BP-1874256318-172.17.0.2-1733268364664/current, will proceed with Du for space computation calculation, 2024-12-03T23:26:08,096 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3/current/BP-1874256318-172.17.0.2-1733268364664/current, will proceed with Du for space computation calculation, 2024-12-03T23:26:08,096 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1/current/BP-1874256318-172.17.0.2-1733268364664/current, will proceed with Du for space computation calculation, 2024-12-03T23:26:08,096 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data4/current/BP-1874256318-172.17.0.2-1733268364664/current, will proceed with Du for space computation calculation, 2024-12-03T23:26:08,140 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T23:26:08,142 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T23:26:08,188 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd8d6a4097c98cfc7 with lease ID 0x44156536eb9d583a: Processing first storage report for DS-b561270c-8419-428c-8d66-8090ccbab66f from datanode DatanodeRegistration(127.0.0.1:39281, datanodeUuid=ae93b843-ed0e-4a0a-9acd-7597bf4b3d80, infoPort=42495, infoSecurePort=0, ipcPort=44719, storageInfo=lv=-57;cid=testClusterID;nsid=1998910720;c=1733268364664) 2024-12-03T23:26:08,190 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd8d6a4097c98cfc7 with lease ID 0x44156536eb9d583a: from storage DS-b561270c-8419-428c-8d66-8090ccbab66f node DatanodeRegistration(127.0.0.1:39281, datanodeUuid=ae93b843-ed0e-4a0a-9acd-7597bf4b3d80, infoPort=42495, infoSecurePort=0, ipcPort=44719, storageInfo=lv=-57;cid=testClusterID;nsid=1998910720;c=1733268364664), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-03T23:26:08,190 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa8e951e610e30219 with lease ID 0x44156536eb9d583b: Processing first storage report for DS-7ecaa6af-295d-44fe-bbc0-1dd5c185add7 from datanode DatanodeRegistration(127.0.0.1:37061, datanodeUuid=e208b783-b4f1-487a-9462-89cb6391e1cd, infoPort=34137, infoSecurePort=0, ipcPort=45585, storageInfo=lv=-57;cid=testClusterID;nsid=1998910720;c=1733268364664) 2024-12-03T23:26:08,191 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8e951e610e30219 with lease ID 0x44156536eb9d583b: from storage DS-7ecaa6af-295d-44fe-bbc0-1dd5c185add7 node DatanodeRegistration(127.0.0.1:37061, datanodeUuid=e208b783-b4f1-487a-9462-89cb6391e1cd, infoPort=34137, infoSecurePort=0, ipcPort=45585, storageInfo=lv=-57;cid=testClusterID;nsid=1998910720;c=1733268364664), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T23:26:08,191 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd8d6a4097c98cfc7 with lease ID 0x44156536eb9d583a: Processing first storage report for DS-0b41b86d-a2bb-425e-9bb4-4e67d52e9fec from datanode DatanodeRegistration(127.0.0.1:39281, datanodeUuid=ae93b843-ed0e-4a0a-9acd-7597bf4b3d80, infoPort=42495, infoSecurePort=0, ipcPort=44719, storageInfo=lv=-57;cid=testClusterID;nsid=1998910720;c=1733268364664) 2024-12-03T23:26:08,191 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd8d6a4097c98cfc7 with lease ID 0x44156536eb9d583a: from storage DS-0b41b86d-a2bb-425e-9bb4-4e67d52e9fec node DatanodeRegistration(127.0.0.1:39281, datanodeUuid=ae93b843-ed0e-4a0a-9acd-7597bf4b3d80, infoPort=42495, infoSecurePort=0, ipcPort=44719, storageInfo=lv=-57;cid=testClusterID;nsid=1998910720;c=1733268364664), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T23:26:08,192 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa8e951e610e30219 with lease ID 0x44156536eb9d583b: Processing first storage report for DS-c0bdee06-d6a9-4a9a-b01d-7c488c80c5d2 from datanode DatanodeRegistration(127.0.0.1:37061, datanodeUuid=e208b783-b4f1-487a-9462-89cb6391e1cd, infoPort=34137, infoSecurePort=0, ipcPort=45585, storageInfo=lv=-57;cid=testClusterID;nsid=1998910720;c=1733268364664) 2024-12-03T23:26:08,192 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8e951e610e30219 with lease ID 0x44156536eb9d583b: from storage DS-c0bdee06-d6a9-4a9a-b01d-7c488c80c5d2 node DatanodeRegistration(127.0.0.1:37061, datanodeUuid=e208b783-b4f1-487a-9462-89cb6391e1cd, infoPort=34137, infoSecurePort=0, ipcPort=45585, storageInfo=lv=-57;cid=testClusterID;nsid=1998910720;c=1733268364664), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T23:26:08,327 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5/current/BP-1874256318-172.17.0.2-1733268364664/current, will proceed with Du for space computation calculation, 2024-12-03T23:26:08,327 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data6/current/BP-1874256318-172.17.0.2-1733268364664/current, will proceed with Du for space computation calculation, 2024-12-03T23:26:08,361 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T23:26:08,366 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbfc68094f9d48cc2 with lease ID 0x44156536eb9d583c: Processing first storage report for DS-d6837643-beec-4f47-b7c3-1e2856818a07 from datanode DatanodeRegistration(127.0.0.1:45743, datanodeUuid=279ee435-d4d4-4708-b3bc-a2799a7600c7, infoPort=39523, infoSecurePort=0, ipcPort=40667, storageInfo=lv=-57;cid=testClusterID;nsid=1998910720;c=1733268364664) 2024-12-03T23:26:08,366 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfc68094f9d48cc2 with lease ID 0x44156536eb9d583c: from storage DS-d6837643-beec-4f47-b7c3-1e2856818a07 node DatanodeRegistration(127.0.0.1:45743, datanodeUuid=279ee435-d4d4-4708-b3bc-a2799a7600c7, infoPort=39523, infoSecurePort=0, ipcPort=40667, storageInfo=lv=-57;cid=testClusterID;nsid=1998910720;c=1733268364664), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T23:26:08,367 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbfc68094f9d48cc2 with lease ID 0x44156536eb9d583c: Processing first storage report for DS-8d38bc58-9aeb-4dfa-99b4-5b56914fd47a from datanode DatanodeRegistration(127.0.0.1:45743, datanodeUuid=279ee435-d4d4-4708-b3bc-a2799a7600c7, infoPort=39523, infoSecurePort=0, ipcPort=40667, storageInfo=lv=-57;cid=testClusterID;nsid=1998910720;c=1733268364664) 2024-12-03T23:26:08,367 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfc68094f9d48cc2 with lease ID 0x44156536eb9d583c: from storage DS-8d38bc58-9aeb-4dfa-99b4-5b56914fd47a node DatanodeRegistration(127.0.0.1:45743, datanodeUuid=279ee435-d4d4-4708-b3bc-a2799a7600c7, infoPort=39523, infoSecurePort=0, ipcPort=40667, storageInfo=lv=-57;cid=testClusterID;nsid=1998910720;c=1733268364664), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T23:26:08,449 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458 2024-12-03T23:26:08,551 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/zookeeper_0, clientPort=54632, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T23:26:08,565 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54632 2024-12-03T23:26:08,587 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T23:26:08,593 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T23:26:08,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741825_1001 (size=7) 2024-12-03T23:26:08,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741825_1001 (size=7) 2024-12-03T23:26:08,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741825_1001 (size=7) 2024-12-03T23:26:09,322 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b with version=8 2024-12-03T23:26:09,323 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/hbase-staging 2024-12-03T23:26:09,406 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-03T23:26:09,667 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ed8014902436:0 server-side Connection retries=45 2024-12-03T23:26:09,677 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T23:26:09,678 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T23:26:09,684 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T23:26:09,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T23:26:09,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T23:26:09,859 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T23:26:09,924 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-03T23:26:09,936 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-03T23:26:09,942 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T23:26:09,979 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 121791 (auto-detected) 2024-12-03T23:26:09,980 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-03T23:26:10,010 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43753 2024-12-03T23:26:10,043 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43753 connecting to ZooKeeper ensemble=127.0.0.1:54632 2024-12-03T23:26:10,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:437530x0, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T23:26:10,199 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43753-0x1019ecdff490000 connected 2024-12-03T23:26:10,279 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T23:26:10,283 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T23:26:10,296 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T23:26:10,301 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b, hbase.cluster.distributed=false 2024-12-03T23:26:10,338 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T23:26:10,342 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43753 2024-12-03T23:26:10,343 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43753 2024-12-03T23:26:10,343 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43753 2024-12-03T23:26:10,345 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43753 2024-12-03T23:26:10,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43753 2024-12-03T23:26:10,438 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ed8014902436:0 server-side Connection retries=45 2024-12-03T23:26:10,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T23:26:10,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T23:26:10,440 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T23:26:10,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T23:26:10,441 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T23:26:10,443 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T23:26:10,446 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T23:26:10,447 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39085 2024-12-03T23:26:10,449 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39085 connecting to ZooKeeper ensemble=127.0.0.1:54632 2024-12-03T23:26:10,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T23:26:10,457 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T23:26:10,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390850x0, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T23:26:10,471 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:390850x0, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T23:26:10,471 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39085-0x1019ecdff490001 connected 2024-12-03T23:26:10,474 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T23:26:10,483 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-03T23:26:10,486 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T23:26:10,495 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T23:26:10,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39085 2024-12-03T23:26:10,498 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39085 2024-12-03T23:26:10,499 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39085 2024-12-03T23:26:10,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39085 2024-12-03T23:26:10,502 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39085 2024-12-03T23:26:10,526 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ed8014902436:0 server-side Connection retries=45 2024-12-03T23:26:10,526 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T23:26:10,527 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T23:26:10,527 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T23:26:10,528 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T23:26:10,528 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T23:26:10,528 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T23:26:10,529 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T23:26:10,530 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41121 2024-12-03T23:26:10,532 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41121 connecting to ZooKeeper ensemble=127.0.0.1:54632 2024-12-03T23:26:10,534 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T23:26:10,538 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T23:26:10,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:411210x0, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T23:26:10,562 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:411210x0, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T23:26:10,563 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T23:26:10,563 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41121-0x1019ecdff490002 connected 2024-12-03T23:26:10,564 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-03T23:26:10,568 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T23:26:10,570 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T23:26:10,571 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41121 2024-12-03T23:26:10,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41121 2024-12-03T23:26:10,576 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41121 2024-12-03T23:26:10,577 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41121 2024-12-03T23:26:10,577 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41121 2024-12-03T23:26:10,601 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ed8014902436:0 server-side Connection retries=45 2024-12-03T23:26:10,601 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T23:26:10,601 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T23:26:10,602 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T23:26:10,602 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T23:26:10,602 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T23:26:10,602 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T23:26:10,602 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T23:26:10,604 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37351 2024-12-03T23:26:10,607 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37351 connecting to ZooKeeper ensemble=127.0.0.1:54632 2024-12-03T23:26:10,608 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T23:26:10,612 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T23:26:10,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:373510x0, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T23:26:10,653 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:373510x0, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T23:26:10,653 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37351-0x1019ecdff490003 connected 2024-12-03T23:26:10,653 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T23:26:10,654 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-03T23:26:10,655 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T23:26:10,658 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T23:26:10,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37351 2024-12-03T23:26:10,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37351 2024-12-03T23:26:10,670 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37351 2024-12-03T23:26:10,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37351 2024-12-03T23:26:10,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37351 2024-12-03T23:26:10,692 DEBUG [M:0;ed8014902436:43753 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ed8014902436:43753 2024-12-03T23:26:10,695 INFO [master/ed8014902436:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ed8014902436,43753,1733268369485 2024-12-03T23:26:10,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T23:26:10,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T23:26:10,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T23:26:10,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T23:26:10,714 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ed8014902436,43753,1733268369485 2024-12-03T23:26:10,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T23:26:10,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T23:26:10,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T23:26:10,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:10,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:10,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:10,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:10,746 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T23:26:10,748 INFO [master/ed8014902436:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ed8014902436,43753,1733268369485 from backup master directory 2024-12-03T23:26:10,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T23:26:10,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T23:26:10,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ed8014902436,43753,1733268369485 2024-12-03T23:26:10,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T23:26:10,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T23:26:10,762 WARN [master/ed8014902436:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T23:26:10,762 INFO [master/ed8014902436:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ed8014902436,43753,1733268369485 2024-12-03T23:26:10,764 INFO [master/ed8014902436:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-03T23:26:10,766 INFO [master/ed8014902436:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-03T23:26:10,837 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/hbase.id] with ID: 365bf15c-9cbe-4838-a767-ac7274dcdbe4 2024-12-03T23:26:10,837 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.tmp/hbase.id 2024-12-03T23:26:10,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741826_1002 (size=42) 2024-12-03T23:26:10,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741826_1002 (size=42) 2024-12-03T23:26:10,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741826_1002 (size=42) 2024-12-03T23:26:10,862 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.tmp/hbase.id]:[hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/hbase.id] 2024-12-03T23:26:10,917 INFO [master/ed8014902436:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T23:26:10,923 INFO [master/ed8014902436:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T23:26:10,942 INFO [master/ed8014902436:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-12-03T23:26:11,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741827_1003 (size=196) 2024-12-03T23:26:11,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741827_1003 (size=196) 2024-12-03T23:26:11,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741827_1003 (size=196) 2024-12-03T23:26:11,104 INFO [master/ed8014902436:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:26:11,107 INFO [master/ed8014902436:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T23:26:11,122 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T23:26:11,127 INFO [master/ed8014902436:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T23:26:11,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741828_1004 (size=1189) 2024-12-03T23:26:11,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741828_1004 (size=1189) 2024-12-03T23:26:11,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741828_1004 (size=1189) 2024-12-03T23:26:11,183 INFO [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/data/master/store 2024-12-03T23:26:11,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741829_1005 (size=34) 2024-12-03T23:26:11,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741829_1005 (size=34) 2024-12-03T23:26:11,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741829_1005 (size=34) 2024-12-03T23:26:11,208 INFO [master/ed8014902436:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-03T23:26:11,211 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:11,212 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T23:26:11,212 INFO [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T23:26:11,212 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T23:26:11,214 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T23:26:11,214 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T23:26:11,214 INFO [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T23:26:11,215 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733268371212Disabling compacts and flushes for region at 1733268371212Disabling writes for close at 1733268371214 (+2 ms)Writing region close event to WAL at 1733268371214Closed at 1733268371214 2024-12-03T23:26:11,217 WARN [master/ed8014902436:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/data/master/store/.initializing 2024-12-03T23:26:11,217 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/WALs/ed8014902436,43753,1733268369485 2024-12-03T23:26:11,226 INFO [master/ed8014902436:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T23:26:11,245 INFO [master/ed8014902436:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ed8014902436%2C43753%2C1733268369485, suffix=, logDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/WALs/ed8014902436,43753,1733268369485, archiveDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/oldWALs, maxLogs=10 2024-12-03T23:26:11,276 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/WALs/ed8014902436,43753,1733268369485/ed8014902436%2C43753%2C1733268369485.1733268371251, exclude list is [], retry=0 2024-12-03T23:26:11,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45743,DS-d6837643-beec-4f47-b7c3-1e2856818a07,DISK] 2024-12-03T23:26:11,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39281,DS-b561270c-8419-428c-8d66-8090ccbab66f,DISK] 2024-12-03T23:26:11,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37061,DS-7ecaa6af-295d-44fe-bbc0-1dd5c185add7,DISK] 2024-12-03T23:26:11,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-03T23:26:11,338 INFO [master/ed8014902436:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/WALs/ed8014902436,43753,1733268369485/ed8014902436%2C43753%2C1733268369485.1733268371251 2024-12-03T23:26:11,340 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42495:42495),(127.0.0.1/127.0.0.1:34137:34137),(127.0.0.1/127.0.0.1:39523:39523)] 2024-12-03T23:26:11,341 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T23:26:11,341 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:11,346 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T23:26:11,348 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T23:26:11,387 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T23:26:11,417 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T23:26:11,422 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:11,425 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T23:26:11,425 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T23:26:11,429 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T23:26:11,429 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:11,430 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:26:11,431 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T23:26:11,433 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T23:26:11,434 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:11,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:26:11,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T23:26:11,439 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T23:26:11,439 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:11,440 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:26:11,441 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T23:26:11,444 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T23:26:11,446 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T23:26:11,452 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T23:26:11,452 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T23:26:11,456 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T23:26:11,460 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T23:26:11,466 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:26:11,467 INFO [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64215613, jitterRate=-0.043112799525260925}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T23:26:11,474 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733268371363Initializing all the Stores at 1733268371365 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733268371366 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268371367 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268371367Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268371367Cleaning up temporary data from old regions at 1733268371452 (+85 ms)Region opened successfully at 1733268371474 (+22 ms) 2024-12-03T23:26:11,476 INFO [master/ed8014902436:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T23:26:11,511 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35fb30bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ed8014902436/172.17.0.2:0 2024-12-03T23:26:11,542 INFO [master/ed8014902436:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T23:26:11,553 INFO [master/ed8014902436:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T23:26:11,553 INFO [master/ed8014902436:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T23:26:11,556 INFO [master/ed8014902436:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T23:26:11,557 INFO [master/ed8014902436:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-03T23:26:11,562 INFO [master/ed8014902436:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-03T23:26:11,563 INFO [master/ed8014902436:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T23:26:11,593 INFO [master/ed8014902436:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T23:26:11,605 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T23:26:11,669 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T23:26:11,671 INFO [master/ed8014902436:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T23:26:11,673 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T23:26:11,677 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T23:26:11,679 INFO [master/ed8014902436:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T23:26:11,683 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T23:26:11,694 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T23:26:11,695 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T23:26:11,702 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T23:26:11,718 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T23:26:11,727 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T23:26:11,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T23:26:11,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T23:26:11,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T23:26:11,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T23:26:11,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,738 INFO [master/ed8014902436:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ed8014902436,43753,1733268369485, sessionid=0x1019ecdff490000, setting cluster-up flag (Was=false) 2024-12-03T23:26:11,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,785 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T23:26:11,787 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ed8014902436,43753,1733268369485 2024-12-03T23:26:11,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:11,835 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T23:26:11,837 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ed8014902436,43753,1733268369485 2024-12-03T23:26:11,843 INFO [master/ed8014902436:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T23:26:11,876 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-12-03T23:26:11,880 INFO [master/ed8014902436:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:26:11,881 INFO [master/ed8014902436:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-03T23:26:11,882 INFO [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(746): ClusterId : 365bf15c-9cbe-4838-a767-ac7274dcdbe4 2024-12-03T23:26:11,882 INFO [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(746): ClusterId : 365bf15c-9cbe-4838-a767-ac7274dcdbe4 2024-12-03T23:26:11,885 DEBUG [RS:2;ed8014902436:37351 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T23:26:11,886 DEBUG [RS:1;ed8014902436:41121 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T23:26:11,887 INFO [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(746): ClusterId : 365bf15c-9cbe-4838-a767-ac7274dcdbe4 2024-12-03T23:26:11,887 DEBUG [RS:0;ed8014902436:39085 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T23:26:11,921 DEBUG [RS:1;ed8014902436:41121 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T23:26:11,921 DEBUG [RS:2;ed8014902436:37351 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T23:26:11,921 DEBUG [RS:0;ed8014902436:39085 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T23:26:11,922 DEBUG [RS:2;ed8014902436:37351 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T23:26:11,922 DEBUG [RS:1;ed8014902436:41121 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T23:26:11,922 DEBUG [RS:0;ed8014902436:39085 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T23:26:11,935 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T23:26:11,936 DEBUG [RS:2;ed8014902436:37351 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T23:26:11,936 DEBUG [RS:0;ed8014902436:39085 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T23:26:11,937 DEBUG [RS:1;ed8014902436:41121 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T23:26:11,937 DEBUG [RS:1;ed8014902436:41121 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20a14f57, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ed8014902436/172.17.0.2:0 2024-12-03T23:26:11,937 DEBUG [RS:0;ed8014902436:39085 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d088fb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ed8014902436/172.17.0.2:0 2024-12-03T23:26:11,938 DEBUG [RS:2;ed8014902436:37351 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77cb1756, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ed8014902436/172.17.0.2:0 2024-12-03T23:26:11,947 INFO [master/ed8014902436:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T23:26:11,955 DEBUG [RS:0;ed8014902436:39085 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ed8014902436:39085 2024-12-03T23:26:11,957 INFO [master/ed8014902436:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T23:26:11,958 DEBUG [RS:1;ed8014902436:41121 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;ed8014902436:41121 2024-12-03T23:26:11,958 DEBUG [RS:2;ed8014902436:37351 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;ed8014902436:37351 2024-12-03T23:26:11,959 INFO [RS:1;ed8014902436:41121 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T23:26:11,959 INFO [RS:2;ed8014902436:37351 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T23:26:11,959 INFO [RS:0;ed8014902436:39085 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T23:26:11,959 INFO [RS:2;ed8014902436:37351 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T23:26:11,959 INFO [RS:1;ed8014902436:41121 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T23:26:11,959 INFO [RS:0;ed8014902436:39085 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T23:26:11,959 DEBUG [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-03T23:26:11,959 DEBUG [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-03T23:26:11,959 DEBUG [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-03T23:26:11,960 INFO [RS:1;ed8014902436:41121 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:26:11,960 INFO [RS:2;ed8014902436:37351 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:26:11,960 DEBUG [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T23:26:11,960 DEBUG [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T23:26:11,960 INFO [RS:0;ed8014902436:39085 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:26:11,960 DEBUG [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T23:26:11,963 INFO [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(2659): reportForDuty to master=ed8014902436,43753,1733268369485 with port=39085, startcode=1733268370401 2024-12-03T23:26:11,963 INFO [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(2659): reportForDuty to master=ed8014902436,43753,1733268369485 with port=37351, startcode=1733268370600 2024-12-03T23:26:11,963 INFO [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(2659): reportForDuty to master=ed8014902436,43753,1733268369485 with port=41121, startcode=1733268370525 2024-12-03T23:26:11,964 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ed8014902436,43753,1733268369485 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T23:26:11,974 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ed8014902436:0, corePoolSize=5, maxPoolSize=5 2024-12-03T23:26:11,975 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ed8014902436:0, corePoolSize=5, maxPoolSize=5 2024-12-03T23:26:11,975 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ed8014902436:0, corePoolSize=5, maxPoolSize=5 2024-12-03T23:26:11,975 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ed8014902436:0, corePoolSize=5, maxPoolSize=5 2024-12-03T23:26:11,975 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ed8014902436:0, corePoolSize=10, maxPoolSize=10 2024-12-03T23:26:11,976 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:11,976 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ed8014902436:0, corePoolSize=2, maxPoolSize=2 2024-12-03T23:26:11,976 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:11,977 DEBUG [RS:0;ed8014902436:39085 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T23:26:11,977 DEBUG [RS:1;ed8014902436:41121 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T23:26:11,977 DEBUG [RS:2;ed8014902436:37351 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T23:26:11,989 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T23:26:11,990 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T23:26:11,996 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:11,996 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T23:26:12,031 INFO [master/ed8014902436:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733268402031 2024-12-03T23:26:12,033 INFO [master/ed8014902436:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T23:26:12,035 INFO [master/ed8014902436:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T23:26:12,042 INFO [master/ed8014902436:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T23:26:12,044 INFO [master/ed8014902436:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T23:26:12,045 INFO [master/ed8014902436:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T23:26:12,045 INFO [master/ed8014902436:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T23:26:12,059 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42615, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T23:26:12,059 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52845, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T23:26:12,049 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,060 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52999, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T23:26:12,068 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T23:26:12,070 INFO [master/ed8014902436:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T23:26:12,072 INFO [master/ed8014902436:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T23:26:12,073 INFO [master/ed8014902436:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T23:26:12,075 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T23:26:12,076 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T23:26:12,079 INFO [master/ed8014902436:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T23:26:12,079 INFO [master/ed8014902436:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T23:26:12,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741831_1007 (size=1321) 2024-12-03T23:26:12,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741831_1007 (size=1321) 2024-12-03T23:26:12,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741831_1007 (size=1321) 2024-12-03T23:26:12,088 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T23:26:12,088 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:26:12,099 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ed8014902436:0:becomeActiveMaster-HFileCleaner.large.0-1733268372081,5,FailOnTimeoutGroup] 2024-12-03T23:26:12,102 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ed8014902436:0:becomeActiveMaster-HFileCleaner.small.0-1733268372099,5,FailOnTimeoutGroup] 2024-12-03T23:26:12,103 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,103 DEBUG [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-03T23:26:12,103 DEBUG [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-03T23:26:12,103 DEBUG [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-03T23:26:12,103 INFO [master/ed8014902436:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T23:26:12,103 WARN [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-03T23:26:12,103 WARN [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-03T23:26:12,103 WARN [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-03T23:26:12,104 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,104 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741832_1008 (size=32) 2024-12-03T23:26:12,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741832_1008 (size=32) 2024-12-03T23:26:12,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741832_1008 (size=32) 2024-12-03T23:26:12,124 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:12,127 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T23:26:12,131 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T23:26:12,131 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:12,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T23:26:12,133 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T23:26:12,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T23:26:12,137 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:12,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T23:26:12,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T23:26:12,143 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T23:26:12,143 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:12,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T23:26:12,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T23:26:12,148 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T23:26:12,148 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:12,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T23:26:12,150 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T23:26:12,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740 2024-12-03T23:26:12,152 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740 2024-12-03T23:26:12,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T23:26:12,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T23:26:12,158 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T23:26:12,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T23:26:12,166 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:26:12,167 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69978207, jitterRate=0.042756542563438416}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T23:26:12,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733268372124Initializing all the Stores at 1733268372126 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733268372126Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733268372127 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268372127Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733268372127Cleaning up temporary data from old regions at 1733268372157 (+30 ms)Region opened successfully at 1733268372170 (+13 ms) 2024-12-03T23:26:12,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T23:26:12,170 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T23:26:12,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T23:26:12,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T23:26:12,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T23:26:12,172 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T23:26:12,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733268372170Disabling compacts and flushes for region at 1733268372170Disabling writes for close at 1733268372170Writing region close event to WAL at 1733268372171 (+1 ms)Closed at 1733268372172 (+1 ms) 2024-12-03T23:26:12,176 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T23:26:12,176 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T23:26:12,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T23:26:12,193 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T23:26:12,196 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T23:26:12,205 INFO [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(2659): reportForDuty to master=ed8014902436,43753,1733268369485 with port=37351, startcode=1733268370600 2024-12-03T23:26:12,205 INFO [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(2659): reportForDuty to master=ed8014902436,43753,1733268369485 with port=41121, startcode=1733268370525 2024-12-03T23:26:12,207 INFO [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(2659): reportForDuty to master=ed8014902436,43753,1733268369485 with port=39085, startcode=1733268370401 2024-12-03T23:26:12,207 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ed8014902436,37351,1733268370600 2024-12-03T23:26:12,211 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] master.ServerManager(517): Registering regionserver=ed8014902436,37351,1733268370600 2024-12-03T23:26:12,220 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ed8014902436,39085,1733268370401 2024-12-03T23:26:12,221 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] master.ServerManager(517): Registering regionserver=ed8014902436,39085,1733268370401 2024-12-03T23:26:12,221 DEBUG [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:26:12,221 DEBUG [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37331 2024-12-03T23:26:12,221 DEBUG [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T23:26:12,226 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ed8014902436,41121,1733268370525 2024-12-03T23:26:12,227 DEBUG [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:26:12,227 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] master.ServerManager(517): Registering regionserver=ed8014902436,41121,1733268370525 2024-12-03T23:26:12,227 DEBUG [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37331 2024-12-03T23:26:12,227 DEBUG [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T23:26:12,230 DEBUG [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:26:12,231 DEBUG [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37331 2024-12-03T23:26:12,231 DEBUG [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T23:26:12,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T23:26:12,306 DEBUG [RS:2;ed8014902436:37351 {}] zookeeper.ZKUtil(111): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ed8014902436,37351,1733268370600 2024-12-03T23:26:12,306 WARN [RS:2;ed8014902436:37351 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T23:26:12,306 DEBUG [RS:0;ed8014902436:39085 {}] zookeeper.ZKUtil(111): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ed8014902436,39085,1733268370401 2024-12-03T23:26:12,307 DEBUG [RS:1;ed8014902436:41121 {}] zookeeper.ZKUtil(111): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ed8014902436,41121,1733268370525 2024-12-03T23:26:12,307 WARN [RS:0;ed8014902436:39085 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T23:26:12,307 INFO [RS:2;ed8014902436:37351 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T23:26:12,307 WARN [RS:1;ed8014902436:41121 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T23:26:12,307 INFO [RS:0;ed8014902436:39085 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T23:26:12,307 INFO [RS:1;ed8014902436:41121 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T23:26:12,307 DEBUG [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,37351,1733268370600 2024-12-03T23:26:12,307 DEBUG [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,39085,1733268370401 2024-12-03T23:26:12,307 DEBUG [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,41121,1733268370525 2024-12-03T23:26:12,308 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ed8014902436,41121,1733268370525] 2024-12-03T23:26:12,309 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ed8014902436,39085,1733268370401] 2024-12-03T23:26:12,309 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ed8014902436,37351,1733268370600] 2024-12-03T23:26:12,333 INFO [RS:0;ed8014902436:39085 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T23:26:12,333 INFO [RS:2;ed8014902436:37351 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T23:26:12,333 INFO [RS:1;ed8014902436:41121 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T23:26:12,346 INFO [RS:2;ed8014902436:37351 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T23:26:12,347 INFO [RS:0;ed8014902436:39085 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T23:26:12,347 INFO [RS:1;ed8014902436:41121 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T23:26:12,347 WARN [ed8014902436:43753 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T23:26:12,352 INFO [RS:1;ed8014902436:41121 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T23:26:12,352 INFO [RS:2;ed8014902436:37351 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T23:26:12,353 INFO [RS:1;ed8014902436:41121 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,353 INFO [RS:2;ed8014902436:37351 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,354 INFO [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T23:26:12,354 INFO [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T23:26:12,355 INFO [RS:0;ed8014902436:39085 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T23:26:12,355 INFO [RS:0;ed8014902436:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,358 INFO [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T23:26:12,361 INFO [RS:0;ed8014902436:39085 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T23:26:12,361 INFO [RS:1;ed8014902436:41121 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T23:26:12,361 INFO [RS:2;ed8014902436:37351 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T23:26:12,363 INFO [RS:1;ed8014902436:41121 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,363 INFO [RS:2;ed8014902436:37351 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,363 INFO [RS:0;ed8014902436:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,363 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,363 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,363 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,363 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,363 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,363 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,363 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,363 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,364 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,364 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,364 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ed8014902436:0, corePoolSize=2, maxPoolSize=2 2024-12-03T23:26:12,364 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ed8014902436:0, corePoolSize=2, maxPoolSize=2 2024-12-03T23:26:12,364 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,364 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,364 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,364 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,364 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,364 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,364 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ed8014902436:0, corePoolSize=2, maxPoolSize=2 2024-12-03T23:26:12,365 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0, corePoolSize=3, maxPoolSize=3 2024-12-03T23:26:12,365 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:0;ed8014902436:39085 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ed8014902436:0, corePoolSize=3, maxPoolSize=3 2024-12-03T23:26:12,365 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,365 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,366 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0, corePoolSize=3, maxPoolSize=3 2024-12-03T23:26:12,366 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ed8014902436:0, corePoolSize=1, maxPoolSize=1 2024-12-03T23:26:12,366 DEBUG [RS:2;ed8014902436:37351 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ed8014902436:0, corePoolSize=3, maxPoolSize=3 2024-12-03T23:26:12,366 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0, corePoolSize=3, maxPoolSize=3 2024-12-03T23:26:12,366 DEBUG [RS:1;ed8014902436:41121 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ed8014902436:0, corePoolSize=3, maxPoolSize=3 2024-12-03T23:26:12,379 INFO [RS:2;ed8014902436:37351 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,379 INFO [RS:2;ed8014902436:37351 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,379 INFO [RS:2;ed8014902436:37351 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,379 INFO [RS:2;ed8014902436:37351 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,379 INFO [RS:0;ed8014902436:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,379 INFO [RS:2;ed8014902436:37351 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,379 INFO [RS:0;ed8014902436:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,379 INFO [RS:2;ed8014902436:37351 {}] hbase.ChoreService(168): Chore ScheduledChore name=ed8014902436,37351,1733268370600-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T23:26:12,379 INFO [RS:0;ed8014902436:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,380 INFO [RS:0;ed8014902436:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,380 INFO [RS:0;ed8014902436:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,380 INFO [RS:0;ed8014902436:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=ed8014902436,39085,1733268370401-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T23:26:12,393 INFO [RS:1;ed8014902436:41121 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,394 INFO [RS:1;ed8014902436:41121 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,394 INFO [RS:1;ed8014902436:41121 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,394 INFO [RS:1;ed8014902436:41121 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,394 INFO [RS:1;ed8014902436:41121 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,394 INFO [RS:1;ed8014902436:41121 {}] hbase.ChoreService(168): Chore ScheduledChore name=ed8014902436,41121,1733268370525-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T23:26:12,406 INFO [RS:2;ed8014902436:37351 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T23:26:12,409 INFO [RS:2;ed8014902436:37351 {}] hbase.ChoreService(168): Chore ScheduledChore name=ed8014902436,37351,1733268370600-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,409 INFO [RS:2;ed8014902436:37351 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,409 INFO [RS:2;ed8014902436:37351 {}] regionserver.Replication(171): ed8014902436,37351,1733268370600 started 2024-12-03T23:26:12,411 INFO [RS:0;ed8014902436:39085 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T23:26:12,411 INFO [RS:0;ed8014902436:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=ed8014902436,39085,1733268370401-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,411 INFO [RS:0;ed8014902436:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,412 INFO [RS:0;ed8014902436:39085 {}] regionserver.Replication(171): ed8014902436,39085,1733268370401 started 2024-12-03T23:26:12,416 INFO [RS:1;ed8014902436:41121 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T23:26:12,416 INFO [RS:1;ed8014902436:41121 {}] hbase.ChoreService(168): Chore ScheduledChore name=ed8014902436,41121,1733268370525-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,416 INFO [RS:1;ed8014902436:41121 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,416 INFO [RS:1;ed8014902436:41121 {}] regionserver.Replication(171): ed8014902436,41121,1733268370525 started 2024-12-03T23:26:12,428 INFO [RS:2;ed8014902436:37351 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,428 INFO [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(1482): Serving as ed8014902436,37351,1733268370600, RpcServer on ed8014902436/172.17.0.2:37351, sessionid=0x1019ecdff490003 2024-12-03T23:26:12,429 DEBUG [RS:2;ed8014902436:37351 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T23:26:12,430 DEBUG [RS:2;ed8014902436:37351 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ed8014902436,37351,1733268370600 2024-12-03T23:26:12,430 DEBUG [RS:2;ed8014902436:37351 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ed8014902436,37351,1733268370600' 2024-12-03T23:26:12,430 DEBUG [RS:2;ed8014902436:37351 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T23:26:12,431 DEBUG [RS:2;ed8014902436:37351 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T23:26:12,432 DEBUG [RS:2;ed8014902436:37351 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T23:26:12,432 DEBUG [RS:2;ed8014902436:37351 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T23:26:12,432 DEBUG [RS:2;ed8014902436:37351 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ed8014902436,37351,1733268370600 2024-12-03T23:26:12,432 DEBUG [RS:2;ed8014902436:37351 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ed8014902436,37351,1733268370600' 2024-12-03T23:26:12,433 INFO [RS:0;ed8014902436:39085 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,434 DEBUG [RS:2;ed8014902436:37351 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T23:26:12,434 INFO [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(1482): Serving as ed8014902436,39085,1733268370401, RpcServer on ed8014902436/172.17.0.2:39085, sessionid=0x1019ecdff490001 2024-12-03T23:26:12,434 DEBUG [RS:0;ed8014902436:39085 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T23:26:12,434 DEBUG [RS:0;ed8014902436:39085 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ed8014902436,39085,1733268370401 2024-12-03T23:26:12,435 DEBUG [RS:0;ed8014902436:39085 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ed8014902436,39085,1733268370401' 2024-12-03T23:26:12,435 DEBUG [RS:0;ed8014902436:39085 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T23:26:12,435 DEBUG [RS:2;ed8014902436:37351 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T23:26:12,436 DEBUG [RS:0;ed8014902436:39085 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T23:26:12,436 DEBUG [RS:2;ed8014902436:37351 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T23:26:12,436 INFO [RS:2;ed8014902436:37351 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T23:26:12,436 INFO [RS:2;ed8014902436:37351 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T23:26:12,436 DEBUG [RS:0;ed8014902436:39085 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T23:26:12,436 DEBUG [RS:0;ed8014902436:39085 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T23:26:12,436 DEBUG [RS:0;ed8014902436:39085 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ed8014902436,39085,1733268370401 2024-12-03T23:26:12,436 DEBUG [RS:0;ed8014902436:39085 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ed8014902436,39085,1733268370401' 2024-12-03T23:26:12,436 DEBUG [RS:0;ed8014902436:39085 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T23:26:12,437 INFO [RS:1;ed8014902436:41121 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:12,437 DEBUG [RS:0;ed8014902436:39085 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T23:26:12,437 INFO [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(1482): Serving as ed8014902436,41121,1733268370525, RpcServer on ed8014902436/172.17.0.2:41121, sessionid=0x1019ecdff490002 2024-12-03T23:26:12,438 DEBUG [RS:1;ed8014902436:41121 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T23:26:12,438 DEBUG [RS:1;ed8014902436:41121 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ed8014902436,41121,1733268370525 2024-12-03T23:26:12,438 DEBUG [RS:1;ed8014902436:41121 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ed8014902436,41121,1733268370525' 2024-12-03T23:26:12,438 DEBUG [RS:1;ed8014902436:41121 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T23:26:12,438 DEBUG [RS:0;ed8014902436:39085 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T23:26:12,438 INFO [RS:0;ed8014902436:39085 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T23:26:12,438 INFO [RS:0;ed8014902436:39085 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T23:26:12,438 DEBUG [RS:1;ed8014902436:41121 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T23:26:12,439 DEBUG [RS:1;ed8014902436:41121 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T23:26:12,439 DEBUG [RS:1;ed8014902436:41121 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T23:26:12,439 DEBUG [RS:1;ed8014902436:41121 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ed8014902436,41121,1733268370525 2024-12-03T23:26:12,439 DEBUG [RS:1;ed8014902436:41121 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ed8014902436,41121,1733268370525' 2024-12-03T23:26:12,439 DEBUG [RS:1;ed8014902436:41121 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T23:26:12,439 DEBUG [RS:1;ed8014902436:41121 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T23:26:12,440 DEBUG [RS:1;ed8014902436:41121 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T23:26:12,440 INFO [RS:1;ed8014902436:41121 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T23:26:12,440 INFO [RS:1;ed8014902436:41121 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T23:26:12,543 INFO [RS:0;ed8014902436:39085 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T23:26:12,543 INFO [RS:1;ed8014902436:41121 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T23:26:12,547 INFO [RS:0;ed8014902436:39085 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ed8014902436%2C39085%2C1733268370401, suffix=, logDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,39085,1733268370401, archiveDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/oldWALs, maxLogs=32 2024-12-03T23:26:12,547 INFO [RS:1;ed8014902436:41121 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ed8014902436%2C41121%2C1733268370525, suffix=, logDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,41121,1733268370525, archiveDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/oldWALs, maxLogs=32 2024-12-03T23:26:12,548 INFO [RS:2;ed8014902436:37351 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T23:26:12,553 INFO [RS:2;ed8014902436:37351 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ed8014902436%2C37351%2C1733268370600, suffix=, logDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,37351,1733268370600, archiveDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/oldWALs, maxLogs=32 2024-12-03T23:26:12,576 DEBUG [RS:2;ed8014902436:37351 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,37351,1733268370600/ed8014902436%2C37351%2C1733268370600.1733268372555, exclude list is [], retry=0 2024-12-03T23:26:12,581 DEBUG [RS:1;ed8014902436:41121 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,41121,1733268370525/ed8014902436%2C41121%2C1733268370525.1733268372551, exclude list is [], retry=0 2024-12-03T23:26:12,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37061,DS-7ecaa6af-295d-44fe-bbc0-1dd5c185add7,DISK] 2024-12-03T23:26:12,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45743,DS-d6837643-beec-4f47-b7c3-1e2856818a07,DISK] 2024-12-03T23:26:12,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39281,DS-b561270c-8419-428c-8d66-8090ccbab66f,DISK] 2024-12-03T23:26:12,594 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39281,DS-b561270c-8419-428c-8d66-8090ccbab66f,DISK] 2024-12-03T23:26:12,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45743,DS-d6837643-beec-4f47-b7c3-1e2856818a07,DISK] 2024-12-03T23:26:12,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37061,DS-7ecaa6af-295d-44fe-bbc0-1dd5c185add7,DISK] 2024-12-03T23:26:12,600 DEBUG [RS:0;ed8014902436:39085 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,39085,1733268370401/ed8014902436%2C39085%2C1733268370401.1733268372552, exclude list is [], retry=0 2024-12-03T23:26:12,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39281,DS-b561270c-8419-428c-8d66-8090ccbab66f,DISK] 2024-12-03T23:26:12,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45743,DS-d6837643-beec-4f47-b7c3-1e2856818a07,DISK] 2024-12-03T23:26:12,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37061,DS-7ecaa6af-295d-44fe-bbc0-1dd5c185add7,DISK] 2024-12-03T23:26:12,659 INFO [RS:2;ed8014902436:37351 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,37351,1733268370600/ed8014902436%2C37351%2C1733268370600.1733268372555 2024-12-03T23:26:12,663 INFO [RS:1;ed8014902436:41121 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,41121,1733268370525/ed8014902436%2C41121%2C1733268370525.1733268372551 2024-12-03T23:26:12,665 DEBUG [RS:2;ed8014902436:37351 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39523:39523),(127.0.0.1/127.0.0.1:42495:42495),(127.0.0.1/127.0.0.1:34137:34137)] 2024-12-03T23:26:12,673 DEBUG [RS:1;ed8014902436:41121 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42495:42495),(127.0.0.1/127.0.0.1:34137:34137),(127.0.0.1/127.0.0.1:39523:39523)] 2024-12-03T23:26:12,674 INFO [RS:0;ed8014902436:39085 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,39085,1733268370401/ed8014902436%2C39085%2C1733268370401.1733268372552 2024-12-03T23:26:12,675 DEBUG [RS:0;ed8014902436:39085 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42495:42495),(127.0.0.1/127.0.0.1:34137:34137),(127.0.0.1/127.0.0.1:39523:39523)] 2024-12-03T23:26:12,850 DEBUG [ed8014902436:43753 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-03T23:26:12,857 DEBUG [ed8014902436:43753 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:26:12,863 DEBUG [ed8014902436:43753 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:26:12,863 DEBUG [ed8014902436:43753 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:26:12,863 DEBUG [ed8014902436:43753 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:26:12,863 DEBUG [ed8014902436:43753 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:26:12,863 DEBUG [ed8014902436:43753 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:26:12,863 DEBUG [ed8014902436:43753 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:26:12,863 INFO [ed8014902436:43753 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:26:12,863 INFO [ed8014902436:43753 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:26:12,863 INFO [ed8014902436:43753 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:26:12,863 DEBUG [ed8014902436:43753 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:26:12,872 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:26:12,881 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ed8014902436,37351,1733268370600, state=OPENING 2024-12-03T23:26:12,902 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T23:26:12,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:12,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:12,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:12,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:12,911 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T23:26:12,912 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T23:26:12,912 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T23:26:12,912 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T23:26:12,915 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T23:26:12,918 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:26:13,095 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T23:26:13,097 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55837, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T23:26:13,113 INFO [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T23:26:13,113 INFO [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T23:26:13,114 INFO [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-03T23:26:13,117 INFO [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ed8014902436%2C37351%2C1733268370600.meta, suffix=.meta, logDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,37351,1733268370600, archiveDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/oldWALs, maxLogs=32 2024-12-03T23:26:13,139 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,37351,1733268370600/ed8014902436%2C37351%2C1733268370600.meta.1733268373120.meta, exclude list is [], retry=0 2024-12-03T23:26:13,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39281,DS-b561270c-8419-428c-8d66-8090ccbab66f,DISK] 2024-12-03T23:26:13,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37061,DS-7ecaa6af-295d-44fe-bbc0-1dd5c185add7,DISK] 2024-12-03T23:26:13,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45743,DS-d6837643-beec-4f47-b7c3-1e2856818a07,DISK] 2024-12-03T23:26:13,158 INFO [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/WALs/ed8014902436,37351,1733268370600/ed8014902436%2C37351%2C1733268370600.meta.1733268373120.meta 2024-12-03T23:26:13,158 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42495:42495),(127.0.0.1/127.0.0.1:34137:34137),(127.0.0.1/127.0.0.1:39523:39523)] 2024-12-03T23:26:13,159 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T23:26:13,160 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-03T23:26:13,161 INFO [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:26:13,162 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T23:26:13,163 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T23:26:13,164 INFO [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T23:26:13,172 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T23:26:13,173 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:13,173 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T23:26:13,173 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T23:26:13,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T23:26:13,178 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T23:26:13,178 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:13,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T23:26:13,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T23:26:13,182 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T23:26:13,182 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:13,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T23:26:13,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T23:26:13,185 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T23:26:13,185 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:13,187 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T23:26:13,187 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T23:26:13,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T23:26:13,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:13,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T23:26:13,191 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T23:26:13,199 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740 2024-12-03T23:26:13,204 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740 2024-12-03T23:26:13,207 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T23:26:13,207 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T23:26:13,209 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T23:26:13,213 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T23:26:13,215 INFO [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69824436, jitterRate=0.04046517610549927}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T23:26:13,215 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T23:26:13,219 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733268373174Writing region info on filesystem at 1733268373174Initializing all the Stores at 1733268373176 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733268373176Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733268373176Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268373176Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733268373176Cleaning up temporary data from old regions at 1733268373207 (+31 ms)Running coprocessor post-open hooks at 1733268373216 (+9 ms)Region opened successfully at 1733268373219 (+3 ms) 2024-12-03T23:26:13,225 INFO [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733268373088 2024-12-03T23:26:13,237 DEBUG [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T23:26:13,238 INFO [RS_OPEN_META-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T23:26:13,240 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:26:13,243 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ed8014902436,37351,1733268370600, state=OPEN 2024-12-03T23:26:13,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T23:26:13,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T23:26:13,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T23:26:13,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T23:26:13,268 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T23:26:13,268 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T23:26:13,268 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T23:26:13,268 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T23:26:13,269 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ed8014902436,37351,1733268370600 2024-12-03T23:26:13,274 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T23:26:13,274 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ed8014902436,37351,1733268370600 in 352 msec 2024-12-03T23:26:13,300 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T23:26:13,300 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T23:26:13,302 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T23:26:13,302 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0930 sec 2024-12-03T23:26:13,346 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:26:13,348 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:26:13,367 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:26:13,370 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42747, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:26:13,395 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.5050 sec 2024-12-03T23:26:13,395 INFO [master/ed8014902436:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733268373395, completionTime=-1 2024-12-03T23:26:13,398 INFO [master/ed8014902436:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-03T23:26:13,398 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T23:26:13,438 INFO [master/ed8014902436:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-03T23:26:13,438 INFO [master/ed8014902436:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733268433438 2024-12-03T23:26:13,438 INFO [master/ed8014902436:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733268493438 2024-12-03T23:26:13,438 INFO [master/ed8014902436:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 39 msec 2024-12-03T23:26:13,443 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-03T23:26:13,452 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ed8014902436,43753,1733268369485-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:13,453 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ed8014902436,43753,1733268369485-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:13,453 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ed8014902436,43753,1733268369485-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:13,454 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ed8014902436:43753, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:13,455 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:13,455 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:13,462 DEBUG [master/ed8014902436:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T23:26:13,488 INFO [master/ed8014902436:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.726sec 2024-12-03T23:26:13,490 INFO [master/ed8014902436:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T23:26:13,491 INFO [master/ed8014902436:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T23:26:13,492 INFO [master/ed8014902436:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T23:26:13,493 INFO [master/ed8014902436:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T23:26:13,493 INFO [master/ed8014902436:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T23:26:13,494 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ed8014902436,43753,1733268369485-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T23:26:13,495 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ed8014902436,43753,1733268369485-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T23:26:13,521 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T23:26:13,522 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is ed8014902436,43753,1733268369485 2024-12-03T23:26:13,525 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@52b5497b 2024-12-03T23:26:13,526 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T23:26:13,528 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39073, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T23:26:13,534 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T23:26:13,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-12-03T23:26:13,546 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:26:13,547 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:13,548 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-12-03T23:26:13,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T23:26:13,557 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:26:13,599 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d574083, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:13,606 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-03T23:26:13,606 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-03T23:26:13,611 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:26:13,614 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:26:13,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741837_1013 (size=349) 2024-12-03T23:26:13,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741837_1013 (size=349) 2024-12-03T23:26:13,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741837_1013 (size=349) 2024-12-03T23:26:13,634 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 94ece278d222af9bae70064026dc24ae, NAME => 'hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:26:13,636 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:26:13,640 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:26:13,641 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:26:13,641 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d846802, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:13,641 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:26:13,646 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:26:13,658 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:13,659 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45968, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:26:13,662 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cbd5cc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:13,663 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:26:13,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741838_1014 (size=36) 2024-12-03T23:26:13,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741838_1014 (size=36) 2024-12-03T23:26:13,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741838_1014 (size=36) 2024-12-03T23:26:13,672 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T23:26:13,672 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 94ece278d222af9bae70064026dc24ae, disabling compactions & flushes 2024-12-03T23:26:13,672 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:26:13,672 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:26:13,673 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. after waiting 0 ms 2024-12-03T23:26:13,673 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:26:13,673 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:26:13,673 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 94ece278d222af9bae70064026dc24ae: Waiting for close lock at 1733268373672Disabling compacts and flushes for region at 1733268373672Disabling writes for close at 1733268373673 (+1 ms)Writing region close event to WAL at 1733268373673Closed at 1733268373673 2024-12-03T23:26:13,676 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:26:13,677 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:26:13,677 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:26:13,685 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733268373678"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268373678"}]},"ts":"1733268373678"} 2024-12-03T23:26:13,688 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51870, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:26:13,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ed8014902436,43753,1733268369485 2024-12-03T23:26:13,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-12-03T23:26:13,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/test.cache.data in system properties and HBase conf 2024-12-03T23:26:13,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T23:26:13,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir in system properties and HBase conf 2024-12-03T23:26:13,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T23:26:13,693 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T23:26:13,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T23:26:13,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T23:26:13,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T23:26:13,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T23:26:13,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T23:26:13,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T23:26:13,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T23:26:13,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T23:26:13,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T23:26:13,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T23:26:13,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T23:26:13,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/nfs.dump.dir in system properties and HBase conf 2024-12-03T23:26:13,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/java.io.tmpdir in system properties and HBase conf 2024-12-03T23:26:13,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T23:26:13,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T23:26:13,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T23:26:13,697 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:26:13,702 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268373697"}]},"ts":"1733268373697"} 2024-12-03T23:26:13,712 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-03T23:26:13,713 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:26:13,717 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:26:13,717 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:26:13,717 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:26:13,717 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:26:13,717 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:26:13,717 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:26:13,717 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:26:13,717 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:26:13,717 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:26:13,717 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:26:13,719 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=94ece278d222af9bae70064026dc24ae, ASSIGN}] 2024-12-03T23:26:13,724 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=94ece278d222af9bae70064026dc24ae, ASSIGN 2024-12-03T23:26:13,728 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=94ece278d222af9bae70064026dc24ae, ASSIGN; state=OFFLINE, location=ed8014902436,37351,1733268370600; forceNewPlan=false, retain=false 2024-12-03T23:26:13,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741839_1015 (size=592039) 2024-12-03T23:26:13,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741839_1015 (size=592039) 2024-12-03T23:26:13,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741839_1015 (size=592039) 2024-12-03T23:26:13,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741840_1016 (size=1663647) 2024-12-03T23:26:13,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741840_1016 (size=1663647) 2024-12-03T23:26:13,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741840_1016 (size=1663647) 2024-12-03T23:26:13,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T23:26:13,881 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-03T23:26:13,883 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=94ece278d222af9bae70064026dc24ae, regionState=OPENING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:26:13,889 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=94ece278d222af9bae70064026dc24ae, ASSIGN because future has completed 2024-12-03T23:26:13,890 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 94ece278d222af9bae70064026dc24ae, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:26:14,092 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:26:14,093 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 94ece278d222af9bae70064026dc24ae, NAME => 'hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae.', STARTKEY => '', ENDKEY => ''} 2024-12-03T23:26:14,093 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. service=AccessControlService 2024-12-03T23:26:14,094 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:26:14,094 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 94ece278d222af9bae70064026dc24ae 2024-12-03T23:26:14,096 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:14,106 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:26:14,106 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:26:14,110 INFO [StoreOpener-94ece278d222af9bae70064026dc24ae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 94ece278d222af9bae70064026dc24ae 2024-12-03T23:26:14,113 INFO [StoreOpener-94ece278d222af9bae70064026dc24ae-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 94ece278d222af9bae70064026dc24ae columnFamilyName l 2024-12-03T23:26:14,113 DEBUG [StoreOpener-94ece278d222af9bae70064026dc24ae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:14,115 INFO [StoreOpener-94ece278d222af9bae70064026dc24ae-1 {}] regionserver.HStore(327): Store=94ece278d222af9bae70064026dc24ae/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:26:14,115 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:26:14,119 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae 2024-12-03T23:26:14,120 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae 2024-12-03T23:26:14,121 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:26:14,121 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:26:14,129 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:26:14,134 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:26:14,135 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 94ece278d222af9bae70064026dc24ae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73516552, jitterRate=0.0954819917678833}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:26:14,135 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:26:14,146 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 94ece278d222af9bae70064026dc24ae: Running coprocessor pre-open hook at 1733268374106Writing region info on filesystem at 1733268374107 (+1 ms)Initializing all the Stores at 1733268374109 (+2 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733268374109Cleaning up temporary data from old regions at 1733268374121 (+12 ms)Running coprocessor post-open hooks at 1733268374135 (+14 ms)Region opened successfully at 1733268374146 (+11 ms) 2024-12-03T23:26:14,149 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., pid=6, masterSystemTime=1733268374064 2024-12-03T23:26:14,155 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:26:14,156 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:26:14,168 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=94ece278d222af9bae70064026dc24ae, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:26:14,178 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 94ece278d222af9bae70064026dc24ae, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:26:14,184 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=ed8014902436,37351,1733268370600, table=hbase:acl, region=94ece278d222af9bae70064026dc24ae. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-03T23:26:14,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T23:26:14,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T23:26:14,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 94ece278d222af9bae70064026dc24ae, server=ed8014902436,37351,1733268370600 in 297 msec 2024-12-03T23:26:14,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T23:26:14,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=94ece278d222af9bae70064026dc24ae, ASSIGN in 482 msec 2024-12-03T23:26:14,214 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:26:14,214 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268374214"}]},"ts":"1733268374214"} 2024-12-03T23:26:14,219 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-03T23:26:14,223 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:26:14,234 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 685 msec 2024-12-03T23:26:14,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T23:26:14,701 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-12-03T23:26:14,707 DEBUG [master/ed8014902436:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T23:26:14,708 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T23:26:14,711 INFO [master/ed8014902436:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ed8014902436,43753,1733268369485-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T23:26:15,504 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T23:26:15,611 WARN [Thread-383 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T23:26:15,805 INFO [Thread-383 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T23:26:15,805 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-03T23:26:15,806 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T23:26:15,838 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T23:26:15,838 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T23:26:15,838 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T23:26:15,841 INFO [Thread-383 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T23:26:15,841 INFO [Thread-383 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T23:26:15,841 INFO [Thread-383 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T23:26:15,842 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T23:26:15,842 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dc5d503{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,AVAILABLE} 2024-12-03T23:26:15,843 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@230c0589{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-03T23:26:15,861 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@534bb42e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,AVAILABLE} 2024-12-03T23:26:15,861 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d2a5e21{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-03T23:26:16,013 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-03T23:26:16,013 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-03T23:26:16,013 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-03T23:26:16,016 INFO [Thread-383 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-03T23:26:16,082 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-03T23:26:16,542 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-03T23:26:17,073 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-03T23:26:17,115 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b303091{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/java.io.tmpdir/jetty-localhost-38733-hadoop-yarn-common-3_4_1_jar-_-any-6016838626611644167/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-03T23:26:17,115 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ef8b464{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/java.io.tmpdir/jetty-localhost-40181-hadoop-yarn-common-3_4_1_jar-_-any-3875154175101011731/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-03T23:26:17,116 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2cc0d6f8{HTTP/1.1, (http/1.1)}{localhost:38733} 2024-12-03T23:26:17,116 INFO [Time-limited test {}] server.Server(415): Started @15399ms 2024-12-03T23:26:17,119 INFO [Thread-383 {}] server.AbstractConnector(333): Started ServerConnector@1404e5df{HTTP/1.1, (http/1.1)}{localhost:40181} 2024-12-03T23:26:17,119 INFO [Thread-383 {}] server.Server(415): Started @15402ms 2024-12-03T23:26:17,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741841_1017 (size=5) 2024-12-03T23:26:17,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741841_1017 (size=5) 2024-12-03T23:26:17,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741841_1017 (size=5) 2024-12-03T23:26:18,171 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-03T23:26:18,178 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T23:26:18,242 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-03T23:26:18,243 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T23:26:18,256 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T23:26:18,256 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T23:26:18,256 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T23:26:18,258 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T23:26:18,261 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c129cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,AVAILABLE} 2024-12-03T23:26:18,262 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2727aaa7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-03T23:26:18,368 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-03T23:26:18,369 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-03T23:26:18,369 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-03T23:26:18,369 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-03T23:26:18,386 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-03T23:26:18,420 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-03T23:26:18,584 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-03T23:26:18,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c6df60a{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/java.io.tmpdir/jetty-localhost-41787-hadoop-yarn-common-3_4_1_jar-_-any-9750455233628008832/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-03T23:26:18,598 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@afb0898{HTTP/1.1, (http/1.1)}{localhost:41787} 2024-12-03T23:26:18,599 INFO [Time-limited test {}] server.Server(415): Started @16882ms 2024-12-03T23:26:18,707 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:26:18,791 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T23:26:18,794 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-03T23:26:18,863 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-03T23:26:18,866 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T23:26:18,877 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-03T23:26:18,878 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T23:26:18,887 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T23:26:18,887 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T23:26:18,887 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T23:26:18,888 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T23:26:18,889 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a3730f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,AVAILABLE} 2024-12-03T23:26:18,889 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d33fe8e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-03T23:26:18,950 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-03T23:26:18,950 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-03T23:26:18,950 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-03T23:26:18,950 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-03T23:26:18,977 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-03T23:26:18,992 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-03T23:26:19,138 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-03T23:26:19,148 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@700abb2{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/java.io.tmpdir/jetty-localhost-41547-hadoop-yarn-common-3_4_1_jar-_-any-8069748161306986101/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-03T23:26:19,149 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f5c072{HTTP/1.1, (http/1.1)}{localhost:41547} 2024-12-03T23:26:19,149 INFO [Time-limited test {}] server.Server(415): Started @17432ms 2024-12-03T23:26:19,185 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-12-03T23:26:19,187 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:26:19,219 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=717, OpenFileDescriptor=778, MaxFileDescriptor=1048576, SystemLoadAverage=220, ProcessCount=11, AvailableMemoryMB=4638 2024-12-03T23:26:19,221 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=717 is superior to 500 2024-12-03T23:26:19,228 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T23:26:19,233 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is ed8014902436,43753,1733268369485 2024-12-03T23:26:19,233 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3c44998f 2024-12-03T23:26:19,234 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T23:26:19,238 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45970, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T23:26:19,241 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:26:19,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:19,248 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:26:19,249 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-12-03T23:26:19,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T23:26:19,251 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:26:19,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741842_1018 (size=458) 2024-12-03T23:26:19,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741842_1018 (size=458) 2024-12-03T23:26:19,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741842_1018 (size=458) 2024-12-03T23:26:19,280 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0a946bfb1db0f3ada4bf11d8c357faa1, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:26:19,284 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e9d01e47291deb97fed4270eaf373804, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:26:19,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741843_1019 (size=83) 2024-12-03T23:26:19,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741843_1019 (size=83) 2024-12-03T23:26:19,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741843_1019 (size=83) 2024-12-03T23:26:19,328 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:19,328 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 0a946bfb1db0f3ada4bf11d8c357faa1, disabling compactions & flushes 2024-12-03T23:26:19,328 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:26:19,328 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:26:19,328 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. after waiting 0 ms 2024-12-03T23:26:19,328 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:26:19,328 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:26:19,328 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0a946bfb1db0f3ada4bf11d8c357faa1: Waiting for close lock at 1733268379328Disabling compacts and flushes for region at 1733268379328Disabling writes for close at 1733268379328Writing region close event to WAL at 1733268379328Closed at 1733268379328 2024-12-03T23:26:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741844_1020 (size=83) 2024-12-03T23:26:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741844_1020 (size=83) 2024-12-03T23:26:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741844_1020 (size=83) 2024-12-03T23:26:19,341 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:19,341 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing e9d01e47291deb97fed4270eaf373804, disabling compactions & flushes 2024-12-03T23:26:19,341 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:26:19,341 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:26:19,341 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. after waiting 0 ms 2024-12-03T23:26:19,341 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:26:19,342 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:26:19,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for e9d01e47291deb97fed4270eaf373804: Waiting for close lock at 1733268379341Disabling compacts and flushes for region at 1733268379341Disabling writes for close at 1733268379341Writing region close event to WAL at 1733268379342 (+1 ms)Closed at 1733268379342 2024-12-03T23:26:19,344 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:26:19,345 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733268379344"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268379344"}]},"ts":"1733268379344"} 2024-12-03T23:26:19,345 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733268379344"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268379344"}]},"ts":"1733268379344"} 2024-12-03T23:26:19,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T23:26:19,395 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-03T23:26:19,398 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:26:19,399 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268379398"}]},"ts":"1733268379398"} 2024-12-03T23:26:19,405 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-03T23:26:19,405 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:26:19,408 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:26:19,408 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:26:19,408 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:26:19,408 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:26:19,409 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:26:19,409 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:26:19,409 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:26:19,409 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:26:19,409 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:26:19,409 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:26:19,409 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=0a946bfb1db0f3ada4bf11d8c357faa1, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=e9d01e47291deb97fed4270eaf373804, ASSIGN}] 2024-12-03T23:26:19,413 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=e9d01e47291deb97fed4270eaf373804, ASSIGN 2024-12-03T23:26:19,413 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=0a946bfb1db0f3ada4bf11d8c357faa1, ASSIGN 2024-12-03T23:26:19,415 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=e9d01e47291deb97fed4270eaf373804, ASSIGN; state=OFFLINE, location=ed8014902436,41121,1733268370525; forceNewPlan=false, retain=false 2024-12-03T23:26:19,415 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=0a946bfb1db0f3ada4bf11d8c357faa1, ASSIGN; state=OFFLINE, location=ed8014902436,37351,1733268370600; forceNewPlan=false, retain=false 2024-12-03T23:26:19,566 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:26:19,567 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=0a946bfb1db0f3ada4bf11d8c357faa1, regionState=OPENING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:26:19,567 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=e9d01e47291deb97fed4270eaf373804, regionState=OPENING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:26:19,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T23:26:19,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=0a946bfb1db0f3ada4bf11d8c357faa1, ASSIGN because future has completed 2024-12-03T23:26:19,574 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:26:19,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=e9d01e47291deb97fed4270eaf373804, ASSIGN because future has completed 2024-12-03T23:26:19,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure e9d01e47291deb97fed4270eaf373804, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:26:19,740 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:26:19,742 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 0a946bfb1db0f3ada4bf11d8c357faa1, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1.', STARTKEY => '', ENDKEY => '1'} 2024-12-03T23:26:19,742 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. service=AccessControlService 2024-12-03T23:26:19,744 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:26:19,744 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:19,744 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:19,745 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T23:26:19,744 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:19,745 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:19,749 INFO [StoreOpener-0a946bfb1db0f3ada4bf11d8c357faa1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:19,757 INFO [StoreOpener-0a946bfb1db0f3ada4bf11d8c357faa1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a946bfb1db0f3ada4bf11d8c357faa1 columnFamilyName cf 2024-12-03T23:26:19,763 DEBUG [StoreOpener-0a946bfb1db0f3ada4bf11d8c357faa1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:19,765 INFO [StoreOpener-0a946bfb1db0f3ada4bf11d8c357faa1-1 {}] regionserver.HStore(327): Store=0a946bfb1db0f3ada4bf11d8c357faa1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:26:19,766 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52167, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T23:26:19,766 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:19,768 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:19,771 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:19,772 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:19,772 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:19,779 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:19,780 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:26:19,781 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => e9d01e47291deb97fed4270eaf373804, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804.', STARTKEY => '1', ENDKEY => ''} 2024-12-03T23:26:19,781 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. service=AccessControlService 2024-12-03T23:26:19,782 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:26:19,782 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:19,782 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:19,782 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:19,783 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:19,784 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:26:19,785 INFO [StoreOpener-e9d01e47291deb97fed4270eaf373804-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:19,785 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 0a946bfb1db0f3ada4bf11d8c357faa1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66413236, jitterRate=-0.010365664958953857}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:26:19,785 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:19,787 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 0a946bfb1db0f3ada4bf11d8c357faa1: Running coprocessor pre-open hook at 1733268379745Writing region info on filesystem at 1733268379746 (+1 ms)Initializing all the Stores at 1733268379748 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268379748Cleaning up temporary data from old regions at 1733268379772 (+24 ms)Running coprocessor post-open hooks at 1733268379785 (+13 ms)Region opened successfully at 1733268379787 (+2 ms) 2024-12-03T23:26:19,788 INFO [StoreOpener-e9d01e47291deb97fed4270eaf373804-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9d01e47291deb97fed4270eaf373804 columnFamilyName cf 2024-12-03T23:26:19,789 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1., pid=10, masterSystemTime=1733268379733 2024-12-03T23:26:19,790 DEBUG [StoreOpener-e9d01e47291deb97fed4270eaf373804-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:19,791 INFO [StoreOpener-e9d01e47291deb97fed4270eaf373804-1 {}] regionserver.HStore(327): Store=e9d01e47291deb97fed4270eaf373804/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:26:19,792 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:19,795 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:26:19,795 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:19,796 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:26:19,796 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=0a946bfb1db0f3ada4bf11d8c357faa1, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:26:19,796 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:19,799 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:19,799 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:19,799 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=ed8014902436,37351,1733268370600, table=testtb-testExportFileSystemStateWithSplitRegion, region=0a946bfb1db0f3ada4bf11d8c357faa1. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-03T23:26:19,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:26:19,805 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:19,810 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=8 2024-12-03T23:26:19,810 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1, server=ed8014902436,37351,1733268370600 in 230 msec 2024-12-03T23:26:19,813 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=0a946bfb1db0f3ada4bf11d8c357faa1, ASSIGN in 401 msec 2024-12-03T23:26:19,816 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:26:19,817 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened e9d01e47291deb97fed4270eaf373804; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72149807, jitterRate=0.07511590421199799}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:26:19,818 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:19,818 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for e9d01e47291deb97fed4270eaf373804: Running coprocessor pre-open hook at 1733268379783Writing region info on filesystem at 1733268379783Initializing all the Stores at 1733268379785 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268379785Cleaning up temporary data from old regions at 1733268379799 (+14 ms)Running coprocessor post-open hooks at 1733268379818 (+19 ms)Region opened successfully at 1733268379818 2024-12-03T23:26:19,820 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804., pid=11, masterSystemTime=1733268379744 2024-12-03T23:26:19,823 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:26:19,823 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:26:19,824 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=e9d01e47291deb97fed4270eaf373804, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:26:19,828 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=ed8014902436,41121,1733268370525, table=testtb-testExportFileSystemStateWithSplitRegion, region=e9d01e47291deb97fed4270eaf373804. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-03T23:26:19,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure e9d01e47291deb97fed4270eaf373804, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:26:19,837 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=9 2024-12-03T23:26:19,837 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure e9d01e47291deb97fed4270eaf373804, server=ed8014902436,41121,1733268370525 in 251 msec 2024-12-03T23:26:19,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-03T23:26:19,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=e9d01e47291deb97fed4270eaf373804, ASSIGN in 428 msec 2024-12-03T23:26:19,845 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:26:19,846 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268379846"}]},"ts":"1733268379846"} 2024-12-03T23:26:19,851 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-03T23:26:19,854 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:26:19,859 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-03T23:26:19,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:26:19,875 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:26:19,875 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:26:19,876 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:26:19,877 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53251, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-03T23:26:19,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T23:26:19,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:26:19,883 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-03T23:26:19,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:26:19,919 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-03T23:26:19,919 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T23:26:19,919 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-03T23:26:19,921 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:26:19,921 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-03T23:26:19,921 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T23:26:19,921 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T23:26:19,922 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:19,922 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-03T23:26:19,924 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-03T23:26:19,924 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-03T23:26:19,926 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:26:19,926 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-03T23:26:19,926 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-03T23:26:19,927 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-03T23:26:19,927 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T23:26:19,927 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T23:26:19,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-03T23:26:19,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-03T23:26:19,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-03T23:26:19,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-03T23:26:19,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:19,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:19,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:19,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:26:19,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:19,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:19,985 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:26:19,985 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:26:19,986 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:26:19,986 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:26:19,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 744 msec 2024-12-03T23:26:20,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T23:26:20,391 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-03T23:26:20,393 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-03T23:26:20,400 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:20,400 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:26:20,401 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:26:20,404 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-03T23:26:20,420 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-03T23:26:20,433 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:26:20,435 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57638, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:26:20,437 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-03T23:26:20,448 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-03T23:26:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268380448 (current time:1733268380448). 2024-12-03T23:26:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:26:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-03T23:26:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:26:20,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34282cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:20,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:26:20,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:26:20,451 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:26:20,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:26:20,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:26:20,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ce5f9d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:20,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:26:20,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:26:20,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:20,454 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46000, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:26:20,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49f963e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:20,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:26:20,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:26:20,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:26:20,459 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51886, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:26:20,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:26:20,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:26:20,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:20,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:20,468 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:26:20,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@490efc81, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:20,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:26:20,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:26:20,471 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:26:20,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:26:20,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:26:20,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c4233b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:20,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:26:20,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:26:20,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:20,473 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46022, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:26:20,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39917dde, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:20,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:26:20,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:26:20,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:26:20,477 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51898, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:26:20,479 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:26:20,481 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:26:20,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:26:20,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:20,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:20,482 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:26:20,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-03T23:26:20,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:26:20,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-03T23:26:20,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-03T23:26:20,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-03T23:26:20,494 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:26:20,499 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:26:20,511 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:26:20,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741845_1021 (size=215) 2024-12-03T23:26:20,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741845_1021 (size=215) 2024-12-03T23:26:20,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741845_1021 (size=215) 2024-12-03T23:26:20,525 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:26:20,528 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e9d01e47291deb97fed4270eaf373804}] 2024-12-03T23:26:20,532 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:20,532 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:20,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-03T23:26:20,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-12-03T23:26:20,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-12-03T23:26:20,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:26:20,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:26:20,694 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 0a946bfb1db0f3ada4bf11d8c357faa1: 2024-12-03T23:26:20,694 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for e9d01e47291deb97fed4270eaf373804: 2024-12-03T23:26:20,694 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-03T23:26:20,694 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-03T23:26:20,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:20,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:20,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:26:20,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:26:20,699 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:26:20,699 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:26:20,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741846_1022 (size=86) 2024-12-03T23:26:20,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741846_1022 (size=86) 2024-12-03T23:26:20,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741846_1022 (size=86) 2024-12-03T23:26:20,718 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:26:20,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-03T23:26:20,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741847_1023 (size=86) 2024-12-03T23:26:20,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-12-03T23:26:20,722 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:20,723 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:20,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741847_1023 (size=86) 2024-12-03T23:26:20,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741847_1023 (size=86) 2024-12-03T23:26:20,727 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1 in 198 msec 2024-12-03T23:26:20,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:26:20,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-03T23:26:20,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-03T23:26:20,729 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:20,729 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:20,736 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-12-03T23:26:20,736 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e9d01e47291deb97fed4270eaf373804 in 204 msec 2024-12-03T23:26:20,736 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:26:20,739 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:26:20,741 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:26:20,742 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:26:20,742 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:20,743 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-03T23:26:20,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741848_1024 (size=78) 2024-12-03T23:26:20,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741848_1024 (size=78) 2024-12-03T23:26:20,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741848_1024 (size=78) 2024-12-03T23:26:20,760 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:26:20,760 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:20,763 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:20,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741849_1025 (size=713) 2024-12-03T23:26:20,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741849_1025 (size=713) 2024-12-03T23:26:20,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741849_1025 (size=713) 2024-12-03T23:26:20,785 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:26:20,798 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:26:20,799 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:20,803 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:26:20,803 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-03T23:26:20,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 314 msec 2024-12-03T23:26:20,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-03T23:26:20,810 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-03T23:26:20,828 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37351 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:26:20,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:26:20,834 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-03T23:26:20,838 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:20,838 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:26:20,838 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:26:20,841 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-03T23:26:20,848 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-03T23:26:20,859 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-03T23:26:20,864 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-03T23:26:20,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268380864 (current time:1733268380864). 2024-12-03T23:26:20,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:26:20,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-03T23:26:20,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:26:20,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e3ccb0b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:20,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:26:20,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:26:20,867 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:26:20,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:26:20,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:26:20,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@517a707, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:20,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:26:20,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:26:20,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:20,870 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46040, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:26:20,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@197af562, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:20,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:26:20,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:26:20,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:26:20,875 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51900, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:26:20,877 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:26:20,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:26:20,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:20,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:20,878 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:26:20,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d1bc163, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:20,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:26:20,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:26:20,880 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:26:20,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:26:20,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:26:20,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d3f85a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:20,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:26:20,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:26:20,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:20,882 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46064, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:26:20,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@aa185f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:20,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:26:20,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:26:20,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:26:20,888 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51910, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:26:20,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:26:20,894 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:26:20,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:26:20,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:20,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:20,895 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:26:20,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-03T23:26:20,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:26:20,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-03T23:26:20,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-03T23:26:20,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-03T23:26:20,909 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:26:20,912 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:26:20,916 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:26:20,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741850_1026 (size=210) 2024-12-03T23:26:20,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741850_1026 (size=210) 2024-12-03T23:26:20,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741850_1026 (size=210) 2024-12-03T23:26:20,942 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:26:20,942 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e9d01e47291deb97fed4270eaf373804}] 2024-12-03T23:26:20,945 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:20,945 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:21,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-03T23:26:21,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-12-03T23:26:21,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-12-03T23:26:21,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:26:21,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:26:21,104 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing e9d01e47291deb97fed4270eaf373804 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-03T23:26:21,105 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 0a946bfb1db0f3ada4bf11d8c357faa1 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-03T23:26:21,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203b5e8291b56ab4e09807f9ed2e82a0334_e9d01e47291deb97fed4270eaf373804 is 71, key is 1484bbbffee73ccdb611646ba3028e79/cf:q/1733268380831/Put/seqid=0 2024-12-03T23:26:21,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412036221d7bc2230479398f093bd58318b33_0a946bfb1db0f3ada4bf11d8c357faa1 is 71, key is 0d4172a2c2fd59783a6a52dcdb9506b2/cf:q/1733268380828/Put/seqid=0 2024-12-03T23:26:21,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-03T23:26:21,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741851_1027 (size=8241) 2024-12-03T23:26:21,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741851_1027 (size=8241) 2024-12-03T23:26:21,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741851_1027 (size=8241) 2024-12-03T23:26:21,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741852_1028 (size=5032) 2024-12-03T23:26:21,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741852_1028 (size=5032) 2024-12-03T23:26:21,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741852_1028 (size=5032) 2024-12-03T23:26:21,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-03T23:26:21,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:21,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:21,734 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412036221d7bc2230479398f093bd58318b33_0a946bfb1db0f3ada4bf11d8c357faa1 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412036221d7bc2230479398f093bd58318b33_0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:21,734 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203b5e8291b56ab4e09807f9ed2e82a0334_e9d01e47291deb97fed4270eaf373804 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241203b5e8291b56ab4e09807f9ed2e82a0334_e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:21,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/.tmp/cf/1f2ad2900ad74e5789a2b55f43d13c40, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=0a946bfb1db0f3ada4bf11d8c357faa1] 2024-12-03T23:26:21,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/.tmp/cf/715f1938f9d149deb99fb394b71c2aa9, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=e9d01e47291deb97fed4270eaf373804] 2024-12-03T23:26:21,756 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/.tmp/cf/1f2ad2900ad74e5789a2b55f43d13c40 is 224, key is 01035bfebab3fcc9414f2d05a6c9403f1/cf:q/1733268380828/Put/seqid=0 2024-12-03T23:26:21,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/.tmp/cf/715f1938f9d149deb99fb394b71c2aa9 is 224, key is 1df072ad44bda793c0c4e9ac80a03aed2/cf:q/1733268380831/Put/seqid=0 2024-12-03T23:26:21,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741853_1029 (size=5754) 2024-12-03T23:26:21,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741853_1029 (size=5754) 2024-12-03T23:26:21,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741853_1029 (size=5754) 2024-12-03T23:26:21,809 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/.tmp/cf/1f2ad2900ad74e5789a2b55f43d13c40 2024-12-03T23:26:21,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/.tmp/cf/1f2ad2900ad74e5789a2b55f43d13c40 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/cf/1f2ad2900ad74e5789a2b55f43d13c40 2024-12-03T23:26:21,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741854_1030 (size=15937) 2024-12-03T23:26:21,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741854_1030 (size=15937) 2024-12-03T23:26:21,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741854_1030 (size=15937) 2024-12-03T23:26:21,829 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/.tmp/cf/715f1938f9d149deb99fb394b71c2aa9 2024-12-03T23:26:21,843 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/.tmp/cf/715f1938f9d149deb99fb394b71c2aa9 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/cf/715f1938f9d149deb99fb394b71c2aa9 2024-12-03T23:26:21,845 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/cf/1f2ad2900ad74e5789a2b55f43d13c40, entries=2, sequenceid=6, filesize=5.6 K 2024-12-03T23:26:21,857 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/cf/715f1938f9d149deb99fb394b71c2aa9, entries=48, sequenceid=6, filesize=15.6 K 2024-12-03T23:26:21,859 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 0a946bfb1db0f3ada4bf11d8c357faa1 in 748ms, sequenceid=6, compaction requested=false 2024-12-03T23:26:21,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-03T23:26:21,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 0a946bfb1db0f3ada4bf11d8c357faa1: 2024-12-03T23:26:21,861 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for e9d01e47291deb97fed4270eaf373804 in 758ms, sequenceid=6, compaction requested=false 2024-12-03T23:26:21,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-03T23:26:21,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for e9d01e47291deb97fed4270eaf373804: 2024-12-03T23:26:21,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-03T23:26:21,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:21,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:21,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:26:21,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:26:21,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/cf/715f1938f9d149deb99fb394b71c2aa9] hfiles 2024-12-03T23:26:21,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/cf/1f2ad2900ad74e5789a2b55f43d13c40] hfiles 2024-12-03T23:26:21,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/cf/1f2ad2900ad74e5789a2b55f43d13c40 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:21,869 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/cf/715f1938f9d149deb99fb394b71c2aa9 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:21,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741856_1032 (size=125) 2024-12-03T23:26:21,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741856_1032 (size=125) 2024-12-03T23:26:21,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741855_1031 (size=125) 2024-12-03T23:26:21,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741856_1032 (size=125) 2024-12-03T23:26:21,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741855_1031 (size=125) 2024-12-03T23:26:21,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:26:21,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741855_1031 (size=125) 2024-12-03T23:26:21,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-03T23:26:21,888 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:26:21,888 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-12-03T23:26:21,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-12-03T23:26:21,888 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:21,888 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:21,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-12-03T23:26:21,889 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:21,889 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:21,894 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e9d01e47291deb97fed4270eaf373804 in 948 msec 2024-12-03T23:26:21,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-12-03T23:26:21,897 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:26:21,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1 in 950 msec 2024-12-03T23:26:21,898 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:26:21,900 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:26:21,900 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:26:21,900 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:21,903 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241203b5e8291b56ab4e09807f9ed2e82a0334_e9d01e47291deb97fed4270eaf373804, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412036221d7bc2230479398f093bd58318b33_0a946bfb1db0f3ada4bf11d8c357faa1] hfiles 2024-12-03T23:26:21,903 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241203b5e8291b56ab4e09807f9ed2e82a0334_e9d01e47291deb97fed4270eaf373804 2024-12-03T23:26:21,903 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412036221d7bc2230479398f093bd58318b33_0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:26:21,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741857_1033 (size=309) 2024-12-03T23:26:21,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741857_1033 (size=309) 2024-12-03T23:26:21,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741857_1033 (size=309) 2024-12-03T23:26:21,914 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:26:21,914 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:21,915 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:21,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741858_1034 (size=1023) 2024-12-03T23:26:21,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741858_1034 (size=1023) 2024-12-03T23:26:21,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741858_1034 (size=1023) 2024-12-03T23:26:21,933 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:26:21,945 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:26:21,946 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:21,955 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:26:21,955 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-03T23:26:21,962 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 1.0590 sec 2024-12-03T23:26:22,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-03T23:26:22,040 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-03T23:26:22,076 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T23:26:22,079 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T23:26:22,081 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36198, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T23:26:22,082 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37351 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-03T23:26:22,091 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T23:26:22,101 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34718, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T23:26:22,102 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41121 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-03T23:26:22,120 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46400, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T23:26:22,120 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39085 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-03T23:26:22,171 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:26:22,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:22,182 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:26:22,183 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:22,183 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-12-03T23:26:22,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-03T23:26:22,187 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:26:22,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741859_1035 (size=390) 2024-12-03T23:26:22,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741859_1035 (size=390) 2024-12-03T23:26:22,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741859_1035 (size=390) 2024-12-03T23:26:22,264 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 54136d4ef8de85437ff664f333348f85, NAME => 'testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:26:22,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-03T23:26:22,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741860_1036 (size=75) 2024-12-03T23:26:22,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741860_1036 (size=75) 2024-12-03T23:26:22,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741860_1036 (size=75) 2024-12-03T23:26:22,374 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:22,375 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 54136d4ef8de85437ff664f333348f85, disabling compactions & flushes 2024-12-03T23:26:22,375 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. 2024-12-03T23:26:22,375 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. 2024-12-03T23:26:22,375 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. after waiting 0 ms 2024-12-03T23:26:22,375 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. 2024-12-03T23:26:22,375 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. 2024-12-03T23:26:22,375 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 54136d4ef8de85437ff664f333348f85: Waiting for close lock at 1733268382374Disabling compacts and flushes for region at 1733268382374Disabling writes for close at 1733268382375 (+1 ms)Writing region close event to WAL at 1733268382375Closed at 1733268382375 2024-12-03T23:26:22,379 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:26:22,379 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733268382379"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268382379"}]},"ts":"1733268382379"} 2024-12-03T23:26:22,385 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T23:26:22,387 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:26:22,389 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268382389"}]},"ts":"1733268382389"} 2024-12-03T23:26:22,398 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-03T23:26:22,399 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:26:22,400 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:26:22,401 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:26:22,402 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:26:22,402 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:26:22,402 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:26:22,402 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:26:22,402 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:26:22,402 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:26:22,402 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:26:22,402 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:26:22,402 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=54136d4ef8de85437ff664f333348f85, ASSIGN}] 2024-12-03T23:26:22,405 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=54136d4ef8de85437ff664f333348f85, ASSIGN 2024-12-03T23:26:22,407 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=54136d4ef8de85437ff664f333348f85, ASSIGN; state=OFFLINE, location=ed8014902436,41121,1733268370525; forceNewPlan=false, retain=false 2024-12-03T23:26:22,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-03T23:26:22,558 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-03T23:26:22,560 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=54136d4ef8de85437ff664f333348f85, regionState=OPENING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:26:22,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=54136d4ef8de85437ff664f333348f85, ASSIGN because future has completed 2024-12-03T23:26:22,571 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 54136d4ef8de85437ff664f333348f85, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:26:22,732 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. 2024-12-03T23:26:22,732 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 54136d4ef8de85437ff664f333348f85, NAME => 'testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85.', STARTKEY => '', ENDKEY => ''} 2024-12-03T23:26:22,733 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. service=AccessControlService 2024-12-03T23:26:22,733 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:26:22,733 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:22,733 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:22,734 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:22,734 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:22,736 INFO [StoreOpener-54136d4ef8de85437ff664f333348f85-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:22,738 INFO [StoreOpener-54136d4ef8de85437ff664f333348f85-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 54136d4ef8de85437ff664f333348f85 columnFamilyName cf 2024-12-03T23:26:22,738 DEBUG [StoreOpener-54136d4ef8de85437ff664f333348f85-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:22,738 INFO [StoreOpener-54136d4ef8de85437ff664f333348f85-1 {}] regionserver.HStore(327): Store=54136d4ef8de85437ff664f333348f85/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:26:22,739 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:22,740 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:22,740 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:22,741 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:22,741 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:22,742 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:22,745 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:26:22,746 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 54136d4ef8de85437ff664f333348f85; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69804921, jitterRate=0.04017437994480133}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:26:22,747 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:22,748 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 54136d4ef8de85437ff664f333348f85: Running coprocessor pre-open hook at 1733268382734Writing region info on filesystem at 1733268382734Initializing all the Stores at 1733268382735 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268382735Cleaning up temporary data from old regions at 1733268382741 (+6 ms)Running coprocessor post-open hooks at 1733268382747 (+6 ms)Region opened successfully at 1733268382748 (+1 ms) 2024-12-03T23:26:22,750 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85., pid=20, masterSystemTime=1733268382725 2024-12-03T23:26:22,754 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. 2024-12-03T23:26:22,754 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. 2024-12-03T23:26:22,755 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=54136d4ef8de85437ff664f333348f85, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:26:22,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 54136d4ef8de85437ff664f333348f85, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:26:22,770 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-03T23:26:22,770 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 54136d4ef8de85437ff664f333348f85, server=ed8014902436,41121,1733268370525 in 193 msec 2024-12-03T23:26:22,781 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-03T23:26:22,781 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=54136d4ef8de85437ff664f333348f85, ASSIGN in 368 msec 2024-12-03T23:26:22,783 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:26:22,783 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268382783"}]},"ts":"1733268382783"} 2024-12-03T23:26:22,786 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-03T23:26:22,788 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:26:22,788 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-03T23:26:22,793 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-03T23:26:22,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-03T23:26:22,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:26:22,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:26:22,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:26:22,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:26:22,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:26:22,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:26:22,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:26:22,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:26:22,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:26:22,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:26:22,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:26:22,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:26:22,862 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 688 msec 2024-12-03T23:26:23,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-03T23:26:23,320 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-03T23:26:23,320 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T23:26:23,323 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T23:26:25,250 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:26:25,430 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-03T23:26:26,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741861_1037 (size=134217728) 2024-12-03T23:26:26,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741861_1037 (size=134217728) 2024-12-03T23:26:26,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741861_1037 (size=134217728) 2024-12-03T23:26:28,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741862_1038 (size=134217728) 2024-12-03T23:26:28,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741862_1038 (size=134217728) 2024-12-03T23:26:28,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741862_1038 (size=134217728) 2024-12-03T23:26:29,649 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:46936 [Receiving block BP-1874256318-172.17.0.2-1733268364664:blk_1073741863_1039] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 880ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3/, blockId=1073741863, seqno=4427 2024-12-03T23:26:29,649 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:49930 [Receiving block BP-1874256318-172.17.0.2-1733268364664:blk_1073741863_1039] {}] datanode.BlockReceiver(464): Slow flushOrSync took 881ms (threshold=300ms), isSync:false, flushTotalNanos=880856559ns, volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5/, blockId=1073741863, seqno=4426 2024-12-03T23:26:29,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:29,918 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-03T23:26:29,961 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1733268383329/Put/seqid=0 2024-12-03T23:26:29,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741863_1039 (size=51979256) 2024-12-03T23:26:29,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741863_1039 (size=51979256) 2024-12-03T23:26:29,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741863_1039 (size=51979256) 2024-12-03T23:26:29,972 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1159d3f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:29,972 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:26:29,973 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:26:29,977 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:26:29,978 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:26:29,978 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:26:29,979 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74757cd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:29,979 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:26:29,979 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:26:29,980 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:29,981 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48570, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:26:29,982 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33698454, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:29,983 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:26:29,984 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:26:29,985 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:26:29,986 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36202, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:26:29,998 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:37331/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-03T23:26:29,999 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T23:26:30,000 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.AsyncConnectionImpl(321): The fetched master address is ed8014902436,43753,1733268369485 2024-12-03T23:26:30,000 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@62e4038d 2024-12-03T23:26:30,000 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T23:26:30,002 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48578, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T23:26:30,009 WARN [IPC Server handler 1 on default port 37331 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-03T23:26:30,017 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85., hostname=ed8014902436,41121,1733268370525, seqNum=2] 2024-12-03T23:26:30,020 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:26:30,023 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34734, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:26:30,029 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-03T23:26:30,069 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:37331/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-12-03T23:26:30,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:26:30,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:26:30,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:26:30,106 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52723, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-03T23:26:30,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T23:26:30,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37351 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:52723 deadline: 1733268450107, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-12-03T23:26:30,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T23:26:30,117 WARN [IPC Server handler 1 on default port 37331 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-03T23:26:30,162 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:37331/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/output/cf/test_file for inclusion in 54136d4ef8de85437ff664f333348f85/cf 2024-12-03T23:26:30,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-12-03T23:26:30,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-03T23:26:30,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:37331/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-03T23:26:30,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HRegion(2603): Flush status journal for 54136d4ef8de85437ff664f333348f85: 2024-12-03T23:26:30,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:37331/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/output/cf/test_file to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/staging/jenkins__testExportFileSystemStateWithSplitRegion__rkeus4g3i8os0f60ajb19jp01j9odaqk0mbl0tlppmrai2n5ekupoal1hhq6q8o3/cf/test_file 2024-12-03T23:26:30,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/staging/jenkins__testExportFileSystemStateWithSplitRegion__rkeus4g3i8os0f60ajb19jp01j9odaqk0mbl0tlppmrai2n5ekupoal1hhq6q8o3/cf/test_file as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_ 2024-12-03T23:26:30,194 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/staging/jenkins__testExportFileSystemStateWithSplitRegion__rkeus4g3i8os0f60ajb19jp01j9odaqk0mbl0tlppmrai2n5ekupoal1hhq6q8o3/cf/test_file into 54136d4ef8de85437ff664f333348f85/cf as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_ - updating store file list. 2024-12-03T23:26:30,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 463304943af74b0885f87a458aa43cfa_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-03T23:26:30,212 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_ into 54136d4ef8de85437ff664f333348f85/cf 2024-12-03T23:26:30,212 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/staging/jenkins__testExportFileSystemStateWithSplitRegion__rkeus4g3i8os0f60ajb19jp01j9odaqk0mbl0tlppmrai2n5ekupoal1hhq6q8o3/cf/test_file into 54136d4ef8de85437ff664f333348f85/cf (new location: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_) 2024-12-03T23:26:30,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/staging/jenkins__testExportFileSystemStateWithSplitRegion__rkeus4g3i8os0f60ajb19jp01j9odaqk0mbl0tlppmrai2n5ekupoal1hhq6q8o3/cf/test_file 2024-12-03T23:26:30,224 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T23:26:30,224 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T23:26:30,224 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:30,225 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:30,225 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:26:30,225 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-03T23:26:30,225 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85., hostname=ed8014902436,41121,1733268370525, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85., hostname=ed8014902436,41121,1733268370525, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=ed8014902436:41121 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-03T23:26:30,226 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85., hostname=ed8014902436,41121,1733268370525, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-03T23:26:30,226 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85., hostname=ed8014902436,41121,1733268370525, seqNum=2 from cache 2024-12-03T23:26:30,228 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] util.NettyFutureUtils(65): IO operation failed org.apache.hbase.thirdparty.io.netty.channel.StacklessClosedChannelException: null at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[hbase-shaded-netty-4.1.9.jar:?] 2024-12-03T23:26:30,234 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85., hostname=ed8014902436,41121,1733268370525, seqNum=2] 2024-12-03T23:26:30,244 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. 2024-12-03T23:26:30,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=ed8014902436,41121,1733268370525 2024-12-03T23:26:30,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=54136d4ef8de85437ff664f333348f85, daughterA=a63704f9ae4639fb39500e4e4c1c926b, daughterB=5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:30,256 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=54136d4ef8de85437ff664f333348f85, daughterA=a63704f9ae4639fb39500e4e4c1c926b, daughterB=5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:30,256 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=54136d4ef8de85437ff664f333348f85, daughterA=a63704f9ae4639fb39500e4e4c1c926b, daughterB=5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:30,256 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=54136d4ef8de85437ff664f333348f85, daughterA=a63704f9ae4639fb39500e4e4c1c926b, daughterB=5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:30,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-03T23:26:30,263 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=54136d4ef8de85437ff664f333348f85, UNASSIGN}] 2024-12-03T23:26:30,265 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=54136d4ef8de85437ff664f333348f85, UNASSIGN 2024-12-03T23:26:30,267 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=54136d4ef8de85437ff664f333348f85, regionState=CLOSING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:26:30,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=54136d4ef8de85437ff664f333348f85, UNASSIGN because future has completed 2024-12-03T23:26:30,271 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-03T23:26:30,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 54136d4ef8de85437ff664f333348f85, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:26:30,331 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=ed8014902436:37351 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 34 more 2024-12-03T23:26:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-03T23:26:30,433 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:30,434 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-03T23:26:30,435 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 54136d4ef8de85437ff664f333348f85, disabling compactions & flushes 2024-12-03T23:26:30,435 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. 2024-12-03T23:26:30,435 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. 2024-12-03T23:26:30,435 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. after waiting 0 ms 2024-12-03T23:26:30,435 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. 2024-12-03T23:26:30,455 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-12-03T23:26:30,459 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:26:30,459 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85. 2024-12-03T23:26:30,459 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 54136d4ef8de85437ff664f333348f85: Waiting for close lock at 1733268390435Running coprocessor pre-close hooks at 1733268390435Disabling compacts and flushes for region at 1733268390435Disabling writes for close at 1733268390435Writing region close event to WAL at 1733268390448 (+13 ms)Running coprocessor post-close hooks at 1733268390456 (+8 ms)Closed at 1733268390459 (+3 ms) 2024-12-03T23:26:30,464 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:30,464 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=54136d4ef8de85437ff664f333348f85, regionState=CLOSED 2024-12-03T23:26:30,468 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 54136d4ef8de85437ff664f333348f85, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:26:30,474 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-03T23:26:30,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure 54136d4ef8de85437ff664f333348f85, server=ed8014902436,41121,1733268370525 in 199 msec 2024-12-03T23:26:30,478 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-12-03T23:26:30,478 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=54136d4ef8de85437ff664f333348f85, UNASSIGN in 212 msec 2024-12-03T23:26:30,490 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:30,494 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=54136d4ef8de85437ff664f333348f85, threads=1 2024-12-03T23:26:30,499 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_ for region: 54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:30,514 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 463304943af74b0885f87a458aa43cfa_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-03T23:26:30,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741864_1040 (size=21) 2024-12-03T23:26:30,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741864_1040 (size=21) 2024-12-03T23:26:30,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741864_1040 (size=21) 2024-12-03T23:26:30,555 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 463304943af74b0885f87a458aa43cfa_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-03T23:26:30,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-03T23:26:30,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741865_1041 (size=21) 2024-12-03T23:26:30,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741865_1041 (size=21) 2024-12-03T23:26:30,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741865_1041 (size=21) 2024-12-03T23:26:30,591 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_ for region: 54136d4ef8de85437ff664f333348f85 2024-12-03T23:26:30,594 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region 54136d4ef8de85437ff664f333348f85 Daughter A: [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85] storefiles, Daughter B: [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85] storefiles. 2024-12-03T23:26:30,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741866_1042 (size=76) 2024-12-03T23:26:30,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741866_1042 (size=76) 2024-12-03T23:26:30,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741866_1042 (size=76) 2024-12-03T23:26:30,618 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:30,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741867_1043 (size=76) 2024-12-03T23:26:30,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741867_1043 (size=76) 2024-12-03T23:26:30,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741867_1043 (size=76) 2024-12-03T23:26:30,640 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:30,649 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-03T23:26:30,651 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-03T23:26:30,653 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733268390653"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1733268390653"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1733268390653"}]},"ts":"1733268390653"} 2024-12-03T23:26:30,654 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733268390653"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268390653"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733268390653"}]},"ts":"1733268390653"} 2024-12-03T23:26:30,654 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733268390653"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268390653"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733268390653"}]},"ts":"1733268390653"} 2024-12-03T23:26:30,665 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a63704f9ae4639fb39500e4e4c1c926b, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5876583ce76c99709588d8bf18d29689, ASSIGN}] 2024-12-03T23:26:30,666 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5876583ce76c99709588d8bf18d29689, ASSIGN 2024-12-03T23:26:30,666 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a63704f9ae4639fb39500e4e4c1c926b, ASSIGN 2024-12-03T23:26:30,667 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a63704f9ae4639fb39500e4e4c1c926b, ASSIGN; state=SPLITTING_NEW, location=ed8014902436,41121,1733268370525; forceNewPlan=false, retain=false 2024-12-03T23:26:30,667 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5876583ce76c99709588d8bf18d29689, ASSIGN; state=SPLITTING_NEW, location=ed8014902436,41121,1733268370525; forceNewPlan=false, retain=false 2024-12-03T23:26:30,818 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:26:30,819 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=a63704f9ae4639fb39500e4e4c1c926b, regionState=OPENING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:26:30,819 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=5876583ce76c99709588d8bf18d29689, regionState=OPENING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:26:30,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5876583ce76c99709588d8bf18d29689, ASSIGN because future has completed 2024-12-03T23:26:30,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5876583ce76c99709588d8bf18d29689, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:26:30,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a63704f9ae4639fb39500e4e4c1c926b, ASSIGN because future has completed 2024-12-03T23:26:30,827 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure a63704f9ae4639fb39500e4e4c1c926b, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:26:30,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-03T23:26:30,984 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b. 2024-12-03T23:26:30,984 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => a63704f9ae4639fb39500e4e4c1c926b, NAME => 'testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b.', STARTKEY => '', ENDKEY => '5'} 2024-12-03T23:26:30,985 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b. service=AccessControlService 2024-12-03T23:26:30,985 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:26:30,985 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:30,985 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:30,986 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:30,986 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:30,988 INFO [StoreOpener-a63704f9ae4639fb39500e4e4c1c926b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:30,989 INFO [StoreOpener-a63704f9ae4639fb39500e4e4c1c926b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a63704f9ae4639fb39500e4e4c1c926b columnFamilyName cf 2024-12-03T23:26:30,989 DEBUG [StoreOpener-a63704f9ae4639fb39500e4e4c1c926b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:31,005 DEBUG [StoreFileOpener-a63704f9ae4639fb39500e4e4c1c926b-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85: NONE, but ROW specified in column family configuration 2024-12-03T23:26:31,026 DEBUG [StoreOpener-a63704f9ae4639fb39500e4e4c1c926b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85->hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_-bottom 2024-12-03T23:26:31,027 INFO [StoreOpener-a63704f9ae4639fb39500e4e4c1c926b-1 {}] regionserver.HStore(327): Store=a63704f9ae4639fb39500e4e4c1c926b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:26:31,027 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:31,028 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:31,031 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:31,031 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:31,031 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:31,034 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:31,036 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened a63704f9ae4639fb39500e4e4c1c926b; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70213594, jitterRate=0.046264082193374634}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:26:31,036 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:31,037 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for a63704f9ae4639fb39500e4e4c1c926b: Running coprocessor pre-open hook at 1733268390986Writing region info on filesystem at 1733268390986Initializing all the Stores at 1733268390987 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268390987Cleaning up temporary data from old regions at 1733268391032 (+45 ms)Running coprocessor post-open hooks at 1733268391037 (+5 ms)Region opened successfully at 1733268391037 2024-12-03T23:26:31,039 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b., pid=27, masterSystemTime=1733268390978 2024-12-03T23:26:31,040 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b.,because compaction is disabled. 2024-12-03T23:26:31,045 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b. 2024-12-03T23:26:31,045 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b. 2024-12-03T23:26:31,045 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689. 2024-12-03T23:26:31,046 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => 5876583ce76c99709588d8bf18d29689, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689.', STARTKEY => '5', ENDKEY => ''} 2024-12-03T23:26:31,046 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=a63704f9ae4639fb39500e4e4c1c926b, regionState=OPEN, openSeqNum=7, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:26:31,046 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689. service=AccessControlService 2024-12-03T23:26:31,046 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:26:31,046 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,047 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:26:31,047 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for 5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,047 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for 5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,048 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=ed8014902436,41121,1733268370525, table=testExportFileSystemStateWithSplitRegion, region=a63704f9ae4639fb39500e4e4c1c926b. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-03T23:26:31,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure a63704f9ae4639fb39500e4e4c1c926b, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:26:31,051 INFO [StoreOpener-5876583ce76c99709588d8bf18d29689-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,053 INFO [StoreOpener-5876583ce76c99709588d8bf18d29689-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5876583ce76c99709588d8bf18d29689 columnFamilyName cf 2024-12-03T23:26:31,054 DEBUG [StoreOpener-5876583ce76c99709588d8bf18d29689-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:31,056 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=24 2024-12-03T23:26:31,057 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure a63704f9ae4639fb39500e4e4c1c926b, server=ed8014902436,41121,1733268370525 in 225 msec 2024-12-03T23:26:31,061 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a63704f9ae4639fb39500e4e4c1c926b, ASSIGN in 392 msec 2024-12-03T23:26:31,067 DEBUG [StoreFileOpener-5876583ce76c99709588d8bf18d29689-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85: NONE, but ROW specified in column family configuration 2024-12-03T23:26:31,069 DEBUG [StoreOpener-5876583ce76c99709588d8bf18d29689-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85->hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_-top 2024-12-03T23:26:31,070 INFO [StoreOpener-5876583ce76c99709588d8bf18d29689-1 {}] regionserver.HStore(327): Store=5876583ce76c99709588d8bf18d29689/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:26:31,070 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for 5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,071 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,072 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,073 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for 5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,073 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for 5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,075 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for 5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,077 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened 5876583ce76c99709588d8bf18d29689; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73538632, jitterRate=0.09581100940704346}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:26:31,077 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,077 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for 5876583ce76c99709588d8bf18d29689: Running coprocessor pre-open hook at 1733268391047Writing region info on filesystem at 1733268391047Initializing all the Stores at 1733268391050 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268391050Cleaning up temporary data from old regions at 1733268391073 (+23 ms)Running coprocessor post-open hooks at 1733268391077 (+4 ms)Region opened successfully at 1733268391077 2024-12-03T23:26:31,078 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689., pid=26, masterSystemTime=1733268390978 2024-12-03T23:26:31,078 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689.,because compaction is disabled. 2024-12-03T23:26:31,081 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689. 2024-12-03T23:26:31,081 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689. 2024-12-03T23:26:31,082 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=5876583ce76c99709588d8bf18d29689, regionState=OPEN, openSeqNum=7, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:26:31,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5876583ce76c99709588d8bf18d29689, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:26:31,090 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=25 2024-12-03T23:26:31,092 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure 5876583ce76c99709588d8bf18d29689, server=ed8014902436,41121,1733268370525 in 262 msec 2024-12-03T23:26:31,093 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=21 2024-12-03T23:26:31,094 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5876583ce76c99709588d8bf18d29689, ASSIGN in 425 msec 2024-12-03T23:26:31,096 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=54136d4ef8de85437ff664f333348f85, daughterA=a63704f9ae4639fb39500e4e4c1c926b, daughterB=5876583ce76c99709588d8bf18d29689 in 843 msec 2024-12-03T23:26:31,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-03T23:26:31,400 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-03T23:26:31,401 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-03T23:26:31,409 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-03T23:26:31,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268391409 (current time:1733268391409). 2024-12-03T23:26:31,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:26:31,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-03T23:26:31,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:26:31,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74483c2f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:31,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:26:31,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:26:31,412 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:26:31,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:26:31,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:26:31,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d771a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:31,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:26:31,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:26:31,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:31,415 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48592, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:26:31,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c4258c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:31,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:26:31,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:26:31,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:26:31,418 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36212, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:26:31,419 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753. 2024-12-03T23:26:31,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:26:31,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:31,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:31,419 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:26:31,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a373b00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:31,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:26:31,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:26:31,421 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:26:31,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:26:31,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:26:31,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22a16022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:31,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:26:31,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:26:31,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:31,422 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48622, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:26:31,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5376b1b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:26:31,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:26:31,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:26:31,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:26:31,425 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36216, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:26:31,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:26:31,430 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753. 2024-12-03T23:26:31,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:26:31,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:31,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:26:31,430 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:26:31,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-03T23:26:31,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:26:31,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-03T23:26:31,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-03T23:26:31,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-03T23:26:31,435 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:26:31,438 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:26:31,441 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:26:31,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741868_1044 (size=197) 2024-12-03T23:26:31,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741868_1044 (size=197) 2024-12-03T23:26:31,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741868_1044 (size=197) 2024-12-03T23:26:31,451 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:26:31,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a63704f9ae4639fb39500e4e4c1c926b}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5876583ce76c99709588d8bf18d29689}] 2024-12-03T23:26:31,452 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,452 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:31,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-03T23:26:31,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-12-03T23:26:31,606 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-12-03T23:26:31,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b. 2024-12-03T23:26:31,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689. 2024-12-03T23:26:31,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for a63704f9ae4639fb39500e4e4c1c926b: 2024-12-03T23:26:31,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-03T23:26:31,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for 5876583ce76c99709588d8bf18d29689: 2024-12-03T23:26:31,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-03T23:26:31,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:31,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:31,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:26:31,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:26:31,608 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85->hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_-bottom] hfiles 2024-12-03T23:26:31,608 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85->hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_-top] hfiles 2024-12-03T23:26:31,608 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:31,608 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:31,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741869_1045 (size=182) 2024-12-03T23:26:31,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741869_1045 (size=182) 2024-12-03T23:26:31,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741870_1046 (size=182) 2024-12-03T23:26:31,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741870_1046 (size=182) 2024-12-03T23:26:31,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741869_1045 (size=182) 2024-12-03T23:26:31,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741870_1046 (size=182) 2024-12-03T23:26:31,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689. 2024-12-03T23:26:31,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b. 2024-12-03T23:26:31,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-12-03T23:26:31,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-03T23:26:31,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-12-03T23:26:31,625 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-12-03T23:26:31,625 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:31,625 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5876583ce76c99709588d8bf18d29689 2024-12-03T23:26:31,626 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:26:31,628 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5876583ce76c99709588d8bf18d29689 in 176 msec 2024-12-03T23:26:31,629 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=28 2024-12-03T23:26:31,630 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a63704f9ae4639fb39500e4e4c1c926b in 176 msec 2024-12-03T23:26:31,630 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:26:31,631 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:26:31,631 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:26:31,631 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:26:31,632 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_] hfiles 2024-12-03T23:26:31,632 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_ 2024-12-03T23:26:31,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741871_1047 (size=129) 2024-12-03T23:26:31,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741871_1047 (size=129) 2024-12-03T23:26:31,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741871_1047 (size=129) 2024-12-03T23:26:31,640 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => 54136d4ef8de85437ff664f333348f85, NAME => 'testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:31,641 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:26:31,642 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:26:31,642 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:31,643 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:31,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741872_1048 (size=891) 2024-12-03T23:26:31,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741872_1048 (size=891) 2024-12-03T23:26:31,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741872_1048 (size=891) 2024-12-03T23:26:31,663 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:26:31,670 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:26:31,671 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:31,673 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:26:31,674 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-03T23:26:31,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 242 msec 2024-12-03T23:26:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-03T23:26:31,750 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-03T23:26:31,750 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268391750 2024-12-03T23:26:31,750 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37331, tgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268391750, rawTgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268391750, srcFsUri=hdfs://localhost:37331, srcDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:26:31,785 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37331, inputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:26:31,785 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268391750, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268391750/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:31,789 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-03T23:26:31,795 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268391750/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-03T23:26:31,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741873_1049 (size=197) 2024-12-03T23:26:31,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741874_1050 (size=891) 2024-12-03T23:26:31,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741873_1049 (size=197) 2024-12-03T23:26:31,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741874_1050 (size=891) 2024-12-03T23:26:31,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741874_1050 (size=891) 2024-12-03T23:26:31,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741873_1049 (size=197) 2024-12-03T23:26:31,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:26:31,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:26:31,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:26:32,815 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-16534846787891742303.jar 2024-12-03T23:26:32,816 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:26:32,816 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:26:32,896 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-15647317550979288256.jar 2024-12-03T23:26:32,897 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:26:32,897 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:26:32,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:26:32,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:26:32,899 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:26:32,899 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:26:32,900 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-03T23:26:32,900 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-03T23:26:32,900 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-03T23:26:32,901 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-03T23:26:32,901 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-03T23:26:32,901 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-03T23:26:32,902 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-03T23:26:32,902 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-03T23:26:32,902 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-03T23:26:32,903 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-03T23:26:32,903 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-03T23:26:32,905 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:26:32,905 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:26:32,906 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:26:32,906 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:26:32,906 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:26:32,907 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:26:32,907 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:26:33,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741875_1051 (size=131440) 2024-12-03T23:26:33,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741875_1051 (size=131440) 2024-12-03T23:26:33,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741875_1051 (size=131440) 2024-12-03T23:26:33,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741876_1052 (size=4188619) 2024-12-03T23:26:33,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741876_1052 (size=4188619) 2024-12-03T23:26:33,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741876_1052 (size=4188619) 2024-12-03T23:26:33,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741877_1053 (size=1323991) 2024-12-03T23:26:33,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741877_1053 (size=1323991) 2024-12-03T23:26:33,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741877_1053 (size=1323991) 2024-12-03T23:26:33,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741878_1054 (size=903924) 2024-12-03T23:26:33,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741878_1054 (size=903924) 2024-12-03T23:26:33,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741878_1054 (size=903924) 2024-12-03T23:26:33,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741879_1055 (size=8360360) 2024-12-03T23:26:33,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741879_1055 (size=8360360) 2024-12-03T23:26:33,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741879_1055 (size=8360360) 2024-12-03T23:26:33,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741880_1056 (size=1877034) 2024-12-03T23:26:33,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741880_1056 (size=1877034) 2024-12-03T23:26:33,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741880_1056 (size=1877034) 2024-12-03T23:26:33,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741881_1057 (size=77835) 2024-12-03T23:26:33,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741881_1057 (size=77835) 2024-12-03T23:26:33,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741881_1057 (size=77835) 2024-12-03T23:26:33,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741882_1058 (size=30949) 2024-12-03T23:26:33,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741882_1058 (size=30949) 2024-12-03T23:26:33,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741882_1058 (size=30949) 2024-12-03T23:26:33,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741883_1059 (size=1597213) 2024-12-03T23:26:33,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741883_1059 (size=1597213) 2024-12-03T23:26:33,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741883_1059 (size=1597213) 2024-12-03T23:26:33,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741884_1060 (size=4695811) 2024-12-03T23:26:33,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741884_1060 (size=4695811) 2024-12-03T23:26:33,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741884_1060 (size=4695811) 2024-12-03T23:26:33,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741885_1061 (size=232957) 2024-12-03T23:26:33,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741885_1061 (size=232957) 2024-12-03T23:26:33,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741885_1061 (size=232957) 2024-12-03T23:26:33,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741886_1062 (size=127628) 2024-12-03T23:26:33,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741886_1062 (size=127628) 2024-12-03T23:26:33,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741886_1062 (size=127628) 2024-12-03T23:26:33,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741887_1063 (size=20406) 2024-12-03T23:26:33,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741887_1063 (size=20406) 2024-12-03T23:26:33,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741887_1063 (size=20406) 2024-12-03T23:26:33,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741888_1064 (size=5175431) 2024-12-03T23:26:33,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741888_1064 (size=5175431) 2024-12-03T23:26:33,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741888_1064 (size=5175431) 2024-12-03T23:26:33,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741889_1065 (size=217634) 2024-12-03T23:26:33,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741889_1065 (size=217634) 2024-12-03T23:26:33,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741889_1065 (size=217634) 2024-12-03T23:26:33,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741890_1066 (size=1832290) 2024-12-03T23:26:33,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741890_1066 (size=1832290) 2024-12-03T23:26:33,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741890_1066 (size=1832290) 2024-12-03T23:26:33,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741891_1067 (size=322274) 2024-12-03T23:26:33,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741891_1067 (size=322274) 2024-12-03T23:26:33,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741891_1067 (size=322274) 2024-12-03T23:26:33,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741892_1068 (size=503880) 2024-12-03T23:26:33,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741892_1068 (size=503880) 2024-12-03T23:26:33,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741892_1068 (size=503880) 2024-12-03T23:26:33,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741893_1069 (size=443172) 2024-12-03T23:26:33,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741893_1069 (size=443172) 2024-12-03T23:26:33,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741893_1069 (size=443172) 2024-12-03T23:26:33,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741894_1070 (size=29229) 2024-12-03T23:26:33,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741894_1070 (size=29229) 2024-12-03T23:26:33,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741894_1070 (size=29229) 2024-12-03T23:26:33,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741895_1071 (size=24096) 2024-12-03T23:26:33,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741895_1071 (size=24096) 2024-12-03T23:26:33,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741895_1071 (size=24096) 2024-12-03T23:26:33,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741896_1072 (size=111872) 2024-12-03T23:26:33,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741896_1072 (size=111872) 2024-12-03T23:26:33,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741896_1072 (size=111872) 2024-12-03T23:26:33,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741897_1073 (size=6424741) 2024-12-03T23:26:33,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741897_1073 (size=6424741) 2024-12-03T23:26:33,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741897_1073 (size=6424741) 2024-12-03T23:26:33,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741898_1074 (size=45609) 2024-12-03T23:26:33,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741898_1074 (size=45609) 2024-12-03T23:26:33,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741898_1074 (size=45609) 2024-12-03T23:26:33,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741899_1075 (size=136454) 2024-12-03T23:26:33,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741899_1075 (size=136454) 2024-12-03T23:26:33,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741899_1075 (size=136454) 2024-12-03T23:26:33,716 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-03T23:26:33,720 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-12-03T23:26:33,727 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=54136d4ef8de85437ff664f333348f85-463304943af74b0885f87a458aa43cfa_SeqId_4_. 2024-12-03T23:26:33,727 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=54136d4ef8de85437ff664f333348f85-463304943af74b0885f87a458aa43cfa_SeqId_4_. 2024-12-03T23:26:33,728 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-12-03T23:26:33,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741900_1076 (size=244) 2024-12-03T23:26:33,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741900_1076 (size=244) 2024-12-03T23:26:33,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741900_1076 (size=244) 2024-12-03T23:26:33,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741901_1077 (size=17) 2024-12-03T23:26:33,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741901_1077 (size=17) 2024-12-03T23:26:33,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741901_1077 (size=17) 2024-12-03T23:26:33,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741902_1078 (size=304054) 2024-12-03T23:26:33,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741902_1078 (size=304054) 2024-12-03T23:26:33,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741902_1078 (size=304054) 2024-12-03T23:26:34,149 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:26:34,150 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:26:34,250 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0001_000001 (auth:SIMPLE) from 127.0.0.1:43902 2024-12-03T23:26:36,001 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:26:38,447 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:26:40,204 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0001_000001 (auth:SIMPLE) from 127.0.0.1:44684 2024-12-03T23:26:40,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741903_1079 (size=349752) 2024-12-03T23:26:40,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741903_1079 (size=349752) 2024-12-03T23:26:40,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741903_1079 (size=349752) 2024-12-03T23:26:42,434 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0001_000001 (auth:SIMPLE) from 127.0.0.1:58900 2024-12-03T23:26:54,955 INFO [master/ed8014902436:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T23:26:54,956 INFO [master/ed8014902436:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T23:27:04,744 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0a946bfb1db0f3ada4bf11d8c357faa1, had cached 0 bytes from a total of 5754 2024-12-03T23:27:04,782 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e9d01e47291deb97fed4270eaf373804, had cached 0 bytes from a total of 15937 2024-12-03T23:27:08,447 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:27:14,522 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 94ece278d222af9bae70064026dc24ae changed from -1.0 to 0.0, refreshing cache 2024-12-03T23:27:14,522 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 0a946bfb1db0f3ada4bf11d8c357faa1 changed from -1.0 to 0.0, refreshing cache 2024-12-03T23:27:14,523 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e9d01e47291deb97fed4270eaf373804 changed from -1.0 to 0.0, refreshing cache 2024-12-03T23:27:15,986 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a63704f9ae4639fb39500e4e4c1c926b, had cached 0 bytes from a total of 320414712 2024-12-03T23:27:16,047 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5876583ce76c99709588d8bf18d29689, had cached 0 bytes from a total of 320414712 2024-12-03T23:27:17,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741904_1080 (size=134217728) 2024-12-03T23:27:17,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741904_1080 (size=134217728) 2024-12-03T23:27:17,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741904_1080 (size=134217728) 2024-12-03T23:27:38,447 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:27:46,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741905_1081 (size=134217728) 2024-12-03T23:27:46,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741905_1081 (size=134217728) 2024-12-03T23:27:46,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741905_1081 (size=134217728) 2024-12-03T23:27:49,745 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0a946bfb1db0f3ada4bf11d8c357faa1, had cached 0 bytes from a total of 5754 2024-12-03T23:27:49,782 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e9d01e47291deb97fed4270eaf373804, had cached 0 bytes from a total of 15937 2024-12-03T23:27:58,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741906_1082 (size=51979256) 2024-12-03T23:27:58,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741906_1082 (size=51979256) 2024-12-03T23:27:58,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741906_1082 (size=51979256) 2024-12-03T23:27:58,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741907_1083 (size=17509) 2024-12-03T23:27:58,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741907_1083 (size=17509) 2024-12-03T23:27:58,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741907_1083 (size=17509) 2024-12-03T23:27:58,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741908_1084 (size=482) 2024-12-03T23:27:58,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741908_1084 (size=482) 2024-12-03T23:27:58,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741908_1084 (size=482) 2024-12-03T23:27:58,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741909_1085 (size=17509) 2024-12-03T23:27:58,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741909_1085 (size=17509) 2024-12-03T23:27:58,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741909_1085 (size=17509) 2024-12-03T23:27:58,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741910_1086 (size=349752) 2024-12-03T23:27:58,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741910_1086 (size=349752) 2024-12-03T23:27:58,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741910_1086 (size=349752) 2024-12-03T23:27:58,806 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0001_000001 (auth:SIMPLE) from 127.0.0.1:56454 2024-12-03T23:27:59,003 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0001/container_1733268377251_0001_01_000002/launch_container.sh] 2024-12-03T23:27:59,003 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0001/container_1733268377251_0001_01_000002/container_tokens] 2024-12-03T23:27:59,003 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0001/container_1733268377251_0001_01_000002/sysfs] 2024-12-03T23:28:00,821 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-03T23:28:00,823 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-03T23:28:00,838 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:00,838 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-03T23:28:00,839 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-03T23:28:00,840 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:00,841 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-03T23:28:00,841 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-03T23:28:00,841 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268391750/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268391750/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:00,842 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268391750/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-03T23:28:00,842 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268391750/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-03T23:28:00,867 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:00,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:00,893 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268480893"}]},"ts":"1733268480893"} 2024-12-03T23:28:00,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-03T23:28:00,896 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-03T23:28:00,896 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-03T23:28:00,898 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-12-03T23:28:00,903 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a63704f9ae4639fb39500e4e4c1c926b, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5876583ce76c99709588d8bf18d29689, UNASSIGN}] 2024-12-03T23:28:00,904 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5876583ce76c99709588d8bf18d29689, UNASSIGN 2024-12-03T23:28:00,904 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a63704f9ae4639fb39500e4e4c1c926b, UNASSIGN 2024-12-03T23:28:00,905 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=a63704f9ae4639fb39500e4e4c1c926b, regionState=CLOSING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:28:00,905 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=5876583ce76c99709588d8bf18d29689, regionState=CLOSING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:28:00,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5876583ce76c99709588d8bf18d29689, UNASSIGN because future has completed 2024-12-03T23:28:00,907 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:28:00,907 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5876583ce76c99709588d8bf18d29689, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:28:00,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a63704f9ae4639fb39500e4e4c1c926b, UNASSIGN because future has completed 2024-12-03T23:28:00,908 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:28:00,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure a63704f9ae4639fb39500e4e4c1c926b, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:28:00,986 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a63704f9ae4639fb39500e4e4c1c926b, had cached 0 bytes from a total of 320414712 2024-12-03T23:28:00,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-03T23:28:01,047 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5876583ce76c99709588d8bf18d29689, had cached 0 bytes from a total of 320414712 2024-12-03T23:28:01,062 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:28:01,062 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:28:01,062 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing a63704f9ae4639fb39500e4e4c1c926b, disabling compactions & flushes 2024-12-03T23:28:01,063 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b. 2024-12-03T23:28:01,063 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b. 2024-12-03T23:28:01,063 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b. after waiting 0 ms 2024-12-03T23:28:01,063 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b. 2024-12-03T23:28:01,082 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-03T23:28:01,083 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:28:01,084 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b. 2024-12-03T23:28:01,084 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for a63704f9ae4639fb39500e4e4c1c926b: Waiting for close lock at 1733268481062Running coprocessor pre-close hooks at 1733268481062Disabling compacts and flushes for region at 1733268481062Disabling writes for close at 1733268481063 (+1 ms)Writing region close event to WAL at 1733268481065 (+2 ms)Running coprocessor post-close hooks at 1733268481083 (+18 ms)Closed at 1733268481084 (+1 ms) 2024-12-03T23:28:01,087 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:28:01,087 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 5876583ce76c99709588d8bf18d29689 2024-12-03T23:28:01,087 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:28:01,088 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 5876583ce76c99709588d8bf18d29689, disabling compactions & flushes 2024-12-03T23:28:01,088 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689. 2024-12-03T23:28:01,088 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689. 2024-12-03T23:28:01,088 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689. after waiting 0 ms 2024-12-03T23:28:01,088 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689. 2024-12-03T23:28:01,088 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=a63704f9ae4639fb39500e4e4c1c926b, regionState=CLOSED 2024-12-03T23:28:01,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure a63704f9ae4639fb39500e4e4c1c926b, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:28:01,093 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-03T23:28:01,093 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:28:01,094 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689. 2024-12-03T23:28:01,094 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 5876583ce76c99709588d8bf18d29689: Waiting for close lock at 1733268481087Running coprocessor pre-close hooks at 1733268481087Disabling compacts and flushes for region at 1733268481087Disabling writes for close at 1733268481088 (+1 ms)Writing region close event to WAL at 1733268481089 (+1 ms)Running coprocessor post-close hooks at 1733268481093 (+4 ms)Closed at 1733268481094 (+1 ms) 2024-12-03T23:28:01,095 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=33 2024-12-03T23:28:01,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure a63704f9ae4639fb39500e4e4c1c926b, server=ed8014902436,41121,1733268370525 in 184 msec 2024-12-03T23:28:01,096 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 5876583ce76c99709588d8bf18d29689 2024-12-03T23:28:01,096 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=5876583ce76c99709588d8bf18d29689, regionState=CLOSED 2024-12-03T23:28:01,097 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a63704f9ae4639fb39500e4e4c1c926b, UNASSIGN in 194 msec 2024-12-03T23:28:01,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5876583ce76c99709588d8bf18d29689, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:28:01,101 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=34 2024-12-03T23:28:01,101 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure 5876583ce76c99709588d8bf18d29689, server=ed8014902436,41121,1733268370525 in 192 msec 2024-12-03T23:28:01,103 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=32 2024-12-03T23:28:01,103 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5876583ce76c99709588d8bf18d29689, UNASSIGN in 199 msec 2024-12-03T23:28:01,105 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-12-03T23:28:01,105 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 206 msec 2024-12-03T23:28:01,107 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268481107"}]},"ts":"1733268481107"} 2024-12-03T23:28:01,108 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-03T23:28:01,108 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-03T23:28:01,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 241 msec 2024-12-03T23:28:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-03T23:28:01,210 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-03T23:28:01,214 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,220 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,222 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,227 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,231 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85 2024-12-03T23:28:01,232 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:28:01,232 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689 2024-12-03T23:28:01,235 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b/recovered.edits] 2024-12-03T23:28:01,235 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/recovered.edits] 2024-12-03T23:28:01,235 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689/recovered.edits] 2024-12-03T23:28:01,279 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85 2024-12-03T23:28:01,279 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_ to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_ 2024-12-03T23:28:01,279 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b/cf/463304943af74b0885f87a458aa43cfa_SeqId_4_.54136d4ef8de85437ff664f333348f85 2024-12-03T23:28:01,283 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b/recovered.edits/10.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b/recovered.edits/10.seqid 2024-12-03T23:28:01,283 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689/recovered.edits/10.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689/recovered.edits/10.seqid 2024-12-03T23:28:01,283 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/recovered.edits/6.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85/recovered.edits/6.seqid 2024-12-03T23:28:01,284 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/5876583ce76c99709588d8bf18d29689 2024-12-03T23:28:01,287 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/a63704f9ae4639fb39500e4e4c1c926b 2024-12-03T23:28:01,288 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportFileSystemStateWithSplitRegion/54136d4ef8de85437ff664f333348f85 2024-12-03T23:28:01,288 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-12-03T23:28:01,290 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37351 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-03T23:28:01,299 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-03T23:28:01,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,335 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-03T23:28:01,335 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-03T23:28:01,335 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-03T23:28:01,335 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-03T23:28:01,337 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-03T23:28:01,338 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,338 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-12-03T23:28:01,338 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268481338"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:01,339 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268481338"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:01,339 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268481338"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:01,342 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-12-03T23:28:01,342 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 54136d4ef8de85437ff664f333348f85, NAME => 'testExportFileSystemStateWithSplitRegion,,1733268382171.54136d4ef8de85437ff664f333348f85.', STARTKEY => '', ENDKEY => ''}, {ENCODED => a63704f9ae4639fb39500e4e4c1c926b, NAME => 'testExportFileSystemStateWithSplitRegion,,1733268390250.a63704f9ae4639fb39500e4e4c1c926b.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => 5876583ce76c99709588d8bf18d29689, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733268390250.5876583ce76c99709588d8bf18d29689.', STARTKEY => '5', ENDKEY => ''}] 2024-12-03T23:28:01,342 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-03T23:28:01,342 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733268481342"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:01,344 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-12-03T23:28:01,346 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:01,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:01,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:01,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:01,348 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 131 msec 2024-12-03T23:28:01,348 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:01,348 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:01,348 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:01,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-03T23:28:01,349 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:01,349 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,349 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-03T23:28:01,350 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-03T23:28:01,353 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268481353"}]},"ts":"1733268481353"} 2024-12-03T23:28:01,355 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-03T23:28:01,355 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-03T23:28:01,357 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-12-03T23:28:01,359 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=0a946bfb1db0f3ada4bf11d8c357faa1, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=e9d01e47291deb97fed4270eaf373804, UNASSIGN}] 2024-12-03T23:28:01,360 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=0a946bfb1db0f3ada4bf11d8c357faa1, UNASSIGN 2024-12-03T23:28:01,360 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=e9d01e47291deb97fed4270eaf373804, UNASSIGN 2024-12-03T23:28:01,361 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=e9d01e47291deb97fed4270eaf373804, regionState=CLOSING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:28:01,361 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=0a946bfb1db0f3ada4bf11d8c357faa1, regionState=CLOSING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:28:01,364 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=e9d01e47291deb97fed4270eaf373804, UNASSIGN because future has completed 2024-12-03T23:28:01,364 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:28:01,364 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure e9d01e47291deb97fed4270eaf373804, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:28:01,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=0a946bfb1db0f3ada4bf11d8c357faa1, UNASSIGN because future has completed 2024-12-03T23:28:01,366 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:28:01,366 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:28:01,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-03T23:28:01,518 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close e9d01e47291deb97fed4270eaf373804 2024-12-03T23:28:01,519 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:28:01,519 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing e9d01e47291deb97fed4270eaf373804, disabling compactions & flushes 2024-12-03T23:28:01,519 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:28:01,519 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:28:01,519 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. after waiting 0 ms 2024-12-03T23:28:01,519 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:28:01,520 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:28:01,520 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:28:01,520 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing 0a946bfb1db0f3ada4bf11d8c357faa1, disabling compactions & flushes 2024-12-03T23:28:01,520 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:28:01,521 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:28:01,521 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. after waiting 0 ms 2024-12-03T23:28:01,521 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:28:01,527 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:28:01,528 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:28:01,528 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804. 2024-12-03T23:28:01,528 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for e9d01e47291deb97fed4270eaf373804: Waiting for close lock at 1733268481519Running coprocessor pre-close hooks at 1733268481519Disabling compacts and flushes for region at 1733268481519Disabling writes for close at 1733268481519Writing region close event to WAL at 1733268481521 (+2 ms)Running coprocessor post-close hooks at 1733268481528 (+7 ms)Closed at 1733268481528 2024-12-03T23:28:01,529 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:28:01,530 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:28:01,530 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1. 2024-12-03T23:28:01,530 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for 0a946bfb1db0f3ada4bf11d8c357faa1: Waiting for close lock at 1733268481520Running coprocessor pre-close hooks at 1733268481520Disabling compacts and flushes for region at 1733268481520Disabling writes for close at 1733268481521 (+1 ms)Writing region close event to WAL at 1733268481522 (+1 ms)Running coprocessor post-close hooks at 1733268481529 (+7 ms)Closed at 1733268481530 (+1 ms) 2024-12-03T23:28:01,531 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed e9d01e47291deb97fed4270eaf373804 2024-12-03T23:28:01,532 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=e9d01e47291deb97fed4270eaf373804, regionState=CLOSED 2024-12-03T23:28:01,532 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed 0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:28:01,533 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=0a946bfb1db0f3ada4bf11d8c357faa1, regionState=CLOSED 2024-12-03T23:28:01,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure e9d01e47291deb97fed4270eaf373804, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:28:01,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:28:01,537 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-12-03T23:28:01,538 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=40 2024-12-03T23:28:01,538 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 0a946bfb1db0f3ada4bf11d8c357faa1, server=ed8014902436,37351,1733268370600 in 169 msec 2024-12-03T23:28:01,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure e9d01e47291deb97fed4270eaf373804, server=ed8014902436,41121,1733268370525 in 171 msec 2024-12-03T23:28:01,538 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=e9d01e47291deb97fed4270eaf373804, UNASSIGN in 178 msec 2024-12-03T23:28:01,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=39 2024-12-03T23:28:01,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=0a946bfb1db0f3ada4bf11d8c357faa1, UNASSIGN in 179 msec 2024-12-03T23:28:01,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-03T23:28:01,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 183 msec 2024-12-03T23:28:01,544 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268481544"}]},"ts":"1733268481544"} 2024-12-03T23:28:01,546 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-03T23:28:01,546 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-03T23:28:01,548 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 196 msec 2024-12-03T23:28:01,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-03T23:28:01,669 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-03T23:28:01,670 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,672 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,673 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,675 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,677 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:28:01,677 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804 2024-12-03T23:28:01,680 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/recovered.edits] 2024-12-03T23:28:01,680 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/recovered.edits] 2024-12-03T23:28:01,685 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/cf/1f2ad2900ad74e5789a2b55f43d13c40 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/cf/1f2ad2900ad74e5789a2b55f43d13c40 2024-12-03T23:28:01,685 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/cf/715f1938f9d149deb99fb394b71c2aa9 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/cf/715f1938f9d149deb99fb394b71c2aa9 2024-12-03T23:28:01,689 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1/recovered.edits/9.seqid 2024-12-03T23:28:01,689 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804/recovered.edits/9.seqid 2024-12-03T23:28:01,689 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:28:01,690 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSplitRegion/e9d01e47291deb97fed4270eaf373804 2024-12-03T23:28:01,690 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-12-03T23:28:01,690 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-03T23:28:01,691 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf] 2024-12-03T23:28:01,694 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241203b5e8291b56ab4e09807f9ed2e82a0334_e9d01e47291deb97fed4270eaf373804 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241203b5e8291b56ab4e09807f9ed2e82a0334_e9d01e47291deb97fed4270eaf373804 2024-12-03T23:28:01,705 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412036221d7bc2230479398f093bd58318b33_0a946bfb1db0f3ada4bf11d8c357faa1 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412036221d7bc2230479398f093bd58318b33_0a946bfb1db0f3ada4bf11d8c357faa1 2024-12-03T23:28:01,705 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-03T23:28:01,708 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,712 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-03T23:28:01,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,719 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-03T23:28:01,719 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-03T23:28:01,720 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-03T23:28:01,720 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-03T23:28:01,720 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-03T23:28:01,721 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,722 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-12-03T23:28:01,722 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268481722"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:01,722 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268481722"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:01,725 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-03T23:28:01,725 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0a946bfb1db0f3ada4bf11d8c357faa1, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733268379240.0a946bfb1db0f3ada4bf11d8c357faa1.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e9d01e47291deb97fed4270eaf373804, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733268379240.e9d01e47291deb97fed4270eaf373804.', STARTKEY => '1', ENDKEY => ''}] 2024-12-03T23:28:01,725 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-03T23:28:01,725 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733268481725"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:01,727 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-12-03T23:28:01,728 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:01,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:01,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:01,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:01,730 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 58 msec 2024-12-03T23:28:01,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-03T23:28:01,731 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,731 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-03T23:28:01,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-03T23:28:01,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,773 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-03T23:28:01,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,779 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-03T23:28:01,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:01,877 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=759 (was 717) Potentially hanging thread: Thread-1404 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:50794 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/ed8014902436:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:33192 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1277598849_1 at /127.0.0.1:50778 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/ed8014902436:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/ed8014902436:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/ed8014902436:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:33981 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 125946) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:40800 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1277598849_1 at /127.0.0.1:40778 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=805 (was 778) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=444 (was 220) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=605 (was 4638) 2024-12-03T23:28:01,879 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=759 is superior to 500 2024-12-03T23:28:01,898 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=759, OpenFileDescriptor=805, MaxFileDescriptor=1048576, SystemLoadAverage=444, ProcessCount=19, AvailableMemoryMB=605 2024-12-03T23:28:01,899 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=759 is superior to 500 2024-12-03T23:28:01,900 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:28:01,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-03T23:28:01,903 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:28:01,903 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-12-03T23:28:01,904 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:28:01,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-03T23:28:01,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741911_1087 (size=442) 2024-12-03T23:28:01,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741911_1087 (size=442) 2024-12-03T23:28:01,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741911_1087 (size=442) 2024-12-03T23:28:01,918 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 69a8c10beb49ca5b02b44d5c1e779b0b, NAME => 'testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:01,919 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 250294dc7a876a930683b1f0a3a4ae1b, NAME => 'testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:01,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741912_1088 (size=67) 2024-12-03T23:28:01,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741912_1088 (size=67) 2024-12-03T23:28:01,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741913_1089 (size=67) 2024-12-03T23:28:01,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741912_1088 (size=67) 2024-12-03T23:28:01,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741913_1089 (size=67) 2024-12-03T23:28:01,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741913_1089 (size=67) 2024-12-03T23:28:01,935 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:01,935 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:01,935 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing 250294dc7a876a930683b1f0a3a4ae1b, disabling compactions & flushes 2024-12-03T23:28:01,935 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing 69a8c10beb49ca5b02b44d5c1e779b0b, disabling compactions & flushes 2024-12-03T23:28:01,935 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:01,935 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:01,935 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:01,935 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:01,935 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. after waiting 0 ms 2024-12-03T23:28:01,935 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. after waiting 0 ms 2024-12-03T23:28:01,935 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:01,935 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:01,935 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:01,935 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:01,935 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for 69a8c10beb49ca5b02b44d5c1e779b0b: Waiting for close lock at 1733268481935Disabling compacts and flushes for region at 1733268481935Disabling writes for close at 1733268481935Writing region close event to WAL at 1733268481935Closed at 1733268481935 2024-12-03T23:28:01,935 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for 250294dc7a876a930683b1f0a3a4ae1b: Waiting for close lock at 1733268481935Disabling compacts and flushes for region at 1733268481935Disabling writes for close at 1733268481935Writing region close event to WAL at 1733268481935Closed at 1733268481935 2024-12-03T23:28:01,937 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:28:01,937 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733268481937"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268481937"}]},"ts":"1733268481937"} 2024-12-03T23:28:01,937 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733268481937"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268481937"}]},"ts":"1733268481937"} 2024-12-03T23:28:01,940 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-03T23:28:01,941 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:28:01,941 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268481941"}]},"ts":"1733268481941"} 2024-12-03T23:28:01,943 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-03T23:28:01,943 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:28:01,945 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:28:01,945 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:28:01,945 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:28:01,945 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:28:01,945 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:28:01,945 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:28:01,945 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:28:01,945 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:28:01,945 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:28:01,945 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:28:01,945 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=69a8c10beb49ca5b02b44d5c1e779b0b, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=250294dc7a876a930683b1f0a3a4ae1b, ASSIGN}] 2024-12-03T23:28:01,946 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=250294dc7a876a930683b1f0a3a4ae1b, ASSIGN 2024-12-03T23:28:01,946 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=69a8c10beb49ca5b02b44d5c1e779b0b, ASSIGN 2024-12-03T23:28:01,947 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=69a8c10beb49ca5b02b44d5c1e779b0b, ASSIGN; state=OFFLINE, location=ed8014902436,41121,1733268370525; forceNewPlan=false, retain=false 2024-12-03T23:28:01,947 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=250294dc7a876a930683b1f0a3a4ae1b, ASSIGN; state=OFFLINE, location=ed8014902436,39085,1733268370401; forceNewPlan=false, retain=false 2024-12-03T23:28:02,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-03T23:28:02,098 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:28:02,099 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=250294dc7a876a930683b1f0a3a4ae1b, regionState=OPENING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:28:02,099 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=69a8c10beb49ca5b02b44d5c1e779b0b, regionState=OPENING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:28:02,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=250294dc7a876a930683b1f0a3a4ae1b, ASSIGN because future has completed 2024-12-03T23:28:02,103 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:28:02,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=69a8c10beb49ca5b02b44d5c1e779b0b, ASSIGN because future has completed 2024-12-03T23:28:02,105 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:28:02,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-03T23:28:02,257 DEBUG [RSProcedureDispatcher-pool-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T23:28:02,259 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50415, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T23:28:02,262 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:02,263 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => 69a8c10beb49ca5b02b44d5c1e779b0b, NAME => 'testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b.', STARTKEY => '', ENDKEY => '1'} 2024-12-03T23:28:02,263 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. service=AccessControlService 2024-12-03T23:28:02,263 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:28:02,264 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,264 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:02,264 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,264 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,265 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:02,265 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => 250294dc7a876a930683b1f0a3a4ae1b, NAME => 'testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b.', STARTKEY => '1', ENDKEY => ''} 2024-12-03T23:28:02,265 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. service=AccessControlService 2024-12-03T23:28:02,265 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:28:02,265 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,265 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:02,266 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,266 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,266 INFO [StoreOpener-69a8c10beb49ca5b02b44d5c1e779b0b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,267 INFO [StoreOpener-250294dc7a876a930683b1f0a3a4ae1b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,269 INFO [StoreOpener-69a8c10beb49ca5b02b44d5c1e779b0b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69a8c10beb49ca5b02b44d5c1e779b0b columnFamilyName cf 2024-12-03T23:28:02,269 INFO [StoreOpener-250294dc7a876a930683b1f0a3a4ae1b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 250294dc7a876a930683b1f0a3a4ae1b columnFamilyName cf 2024-12-03T23:28:02,270 DEBUG [StoreOpener-69a8c10beb49ca5b02b44d5c1e779b0b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:02,270 DEBUG [StoreOpener-250294dc7a876a930683b1f0a3a4ae1b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:02,270 INFO [StoreOpener-250294dc7a876a930683b1f0a3a4ae1b-1 {}] regionserver.HStore(327): Store=250294dc7a876a930683b1f0a3a4ae1b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:28:02,270 INFO [StoreOpener-69a8c10beb49ca5b02b44d5c1e779b0b-1 {}] regionserver.HStore(327): Store=69a8c10beb49ca5b02b44d5c1e779b0b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:28:02,271 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,271 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,271 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,271 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,272 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,272 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,272 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,272 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,272 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,272 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,274 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,274 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,276 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:28:02,276 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened 69a8c10beb49ca5b02b44d5c1e779b0b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59914165, jitterRate=-0.10720936954021454}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:28:02,277 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,277 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for 69a8c10beb49ca5b02b44d5c1e779b0b: Running coprocessor pre-open hook at 1733268482264Writing region info on filesystem at 1733268482264Initializing all the Stores at 1733268482265 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268482265Cleaning up temporary data from old regions at 1733268482272 (+7 ms)Running coprocessor post-open hooks at 1733268482277 (+5 ms)Region opened successfully at 1733268482277 2024-12-03T23:28:02,278 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:28:02,278 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened 250294dc7a876a930683b1f0a3a4ae1b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73112976, jitterRate=0.08946824073791504}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:28:02,278 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b., pid=49, masterSystemTime=1733268482258 2024-12-03T23:28:02,279 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,279 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for 250294dc7a876a930683b1f0a3a4ae1b: Running coprocessor pre-open hook at 1733268482266Writing region info on filesystem at 1733268482266Initializing all the Stores at 1733268482267 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268482267Cleaning up temporary data from old regions at 1733268482272 (+5 ms)Running coprocessor post-open hooks at 1733268482279 (+7 ms)Region opened successfully at 1733268482279 2024-12-03T23:28:02,281 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:02,281 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:02,281 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b., pid=48, masterSystemTime=1733268482256 2024-12-03T23:28:02,281 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=69a8c10beb49ca5b02b44d5c1e779b0b, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:28:02,283 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:02,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:28:02,284 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:02,284 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=250294dc7a876a930683b1f0a3a4ae1b, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:28:02,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:28:02,289 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-12-03T23:28:02,290 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b, server=ed8014902436,41121,1733268370525 in 180 msec 2024-12-03T23:28:02,293 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-12-03T23:28:02,293 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=69a8c10beb49ca5b02b44d5c1e779b0b, ASSIGN in 346 msec 2024-12-03T23:28:02,293 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b, server=ed8014902436,39085,1733268370401 in 186 msec 2024-12-03T23:28:02,297 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=47, resume processing ppid=45 2024-12-03T23:28:02,297 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=250294dc7a876a930683b1f0a3a4ae1b, ASSIGN in 348 msec 2024-12-03T23:28:02,298 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:28:02,298 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268482298"}]},"ts":"1733268482298"} 2024-12-03T23:28:02,301 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-03T23:28:02,302 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:28:02,302 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-03T23:28:02,306 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-03T23:28:02,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:02,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:02,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:02,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:02,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:02,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:02,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:02,347 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:02,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 445 msec 2024-12-03T23:28:02,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-03T23:28:02,530 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-03T23:28:02,530 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-03T23:28:02,533 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-03T23:28:02,533 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:02,534 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:28:02,536 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-03T23:28:02,542 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-03T23:28:02,547 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:02,549 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34042, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:02,551 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-03T23:28:02,554 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-03T23:28:02,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268482554 (current time:1733268482554). 2024-12-03T23:28:02,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:28:02,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-03T23:28:02,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:28:02,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ee0f373, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:02,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:02,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:02,556 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:02,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:02,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:02,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d0f8a2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:02,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:02,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:02,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:02,558 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44128, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:02,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305593de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:02,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:02,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:02,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:02,562 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46664, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:02,563 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:28:02,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:02,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:02,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:02,563 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:02,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3294e308, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:02,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:02,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:02,565 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:02,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:02,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:02,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64c8f42d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:02,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:02,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:02,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:02,567 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44158, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:02,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@304802c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:02,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:02,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:02,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:02,570 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46676, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:02,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:28:02,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:28:02,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:02,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:02,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:02,575 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:02,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-03T23:28:02,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:28:02,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-03T23:28:02,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-03T23:28:02,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-03T23:28:02,579 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:28:02,580 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:28:02,584 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:28:02,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741914_1090 (size=167) 2024-12-03T23:28:02,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741914_1090 (size=167) 2024-12-03T23:28:02,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741914_1090 (size=167) 2024-12-03T23:28:02,597 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:28:02,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b}] 2024-12-03T23:28:02,598 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,599 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-03T23:28:02,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-12-03T23:28:02,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-12-03T23:28:02,753 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:02,753 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:02,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for 69a8c10beb49ca5b02b44d5c1e779b0b: 2024-12-03T23:28:02,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. for emptySnaptb0-testExportWithTargetName completed. 2024-12-03T23:28:02,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for 250294dc7a876a930683b1f0a3a4ae1b: 2024-12-03T23:28:02,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. for emptySnaptb0-testExportWithTargetName completed. 2024-12-03T23:28:02,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-03T23:28:02,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:02,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-03T23:28:02,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:28:02,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:02,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:28:02,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741915_1091 (size=70) 2024-12-03T23:28:02,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741915_1091 (size=70) 2024-12-03T23:28:02,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741916_1092 (size=70) 2024-12-03T23:28:02,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741915_1091 (size=70) 2024-12-03T23:28:02,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741916_1092 (size=70) 2024-12-03T23:28:02,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:02,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-03T23:28:02,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741916_1092 (size=70) 2024-12-03T23:28:02,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:02,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-12-03T23:28:02,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-12-03T23:28:02,772 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-12-03T23:28:02,772 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,773 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,773 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:02,778 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b in 178 msec 2024-12-03T23:28:02,780 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=51, resume processing ppid=50 2024-12-03T23:28:02,780 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b in 178 msec 2024-12-03T23:28:02,780 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:28:02,782 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:28:02,787 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:28:02,787 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:28:02,787 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:02,788 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-03T23:28:02,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741917_1093 (size=62) 2024-12-03T23:28:02,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741917_1093 (size=62) 2024-12-03T23:28:02,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741917_1093 (size=62) 2024-12-03T23:28:02,796 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:28:02,796 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-03T23:28:02,797 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-03T23:28:02,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741918_1094 (size=649) 2024-12-03T23:28:02,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741918_1094 (size=649) 2024-12-03T23:28:02,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741918_1094 (size=649) 2024-12-03T23:28:02,815 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:28:02,819 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:28:02,820 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-03T23:28:02,821 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:28:02,822 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-03T23:28:02,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 246 msec 2024-12-03T23:28:02,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-03T23:28:02,900 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-03T23:28:02,907 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:28:02,909 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:28:02,911 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-03T23:28:02,914 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-03T23:28:02,914 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:02,914 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:28:02,916 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-03T23:28:02,921 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-03T23:28:02,927 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-03T23:28:02,931 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-03T23:28:02,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268482931 (current time:1733268482931). 2024-12-03T23:28:02,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:28:02,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-03T23:28:02,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:28:02,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7eb7d733, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:02,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:02,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:02,932 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:02,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:02,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:02,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60a3b57f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:02,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:02,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:02,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:02,935 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44162, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:02,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cb9d8f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:02,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:02,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:02,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:02,938 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46680, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:02,939 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:28:02,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:02,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:02,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:02,940 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:02,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71a4330b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:02,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:02,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:02,942 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:02,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:02,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:02,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@481c290e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:02,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:02,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:02,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:02,943 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44180, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:02,944 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f473805, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:02,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:02,946 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:02,947 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:02,948 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46686, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:02,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:28:02,951 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:28:02,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:02,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:02,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:02,952 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:02,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-03T23:28:02,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:28:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-03T23:28:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-03T23:28:02,955 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:28:02,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-03T23:28:02,956 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:28:02,959 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:28:02,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741919_1095 (size=162) 2024-12-03T23:28:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741919_1095 (size=162) 2024-12-03T23:28:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741919_1095 (size=162) 2024-12-03T23:28:02,969 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:28:02,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b}] 2024-12-03T23:28:02,970 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:02,970 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:03,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-03T23:28:03,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-12-03T23:28:03,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-12-03T23:28:03,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:03,123 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing 69a8c10beb49ca5b02b44d5c1e779b0b 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-03T23:28:03,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:03,123 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing 250294dc7a876a930683b1f0a3a4ae1b 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-03T23:28:03,147 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203b80bab0b3b8d4e18b82b940422611cfc_69a8c10beb49ca5b02b44d5c1e779b0b is 71, key is 0609e46b2b5343535d8a35e1125405ca/cf:q/1733268482907/Put/seqid=0 2024-12-03T23:28:03,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412033c624b07ba6645cebb6e81c73b42bc02_250294dc7a876a930683b1f0a3a4ae1b is 71, key is 18fc7e8d62249ec095da2be98a29a077/cf:q/1733268482909/Put/seqid=0 2024-12-03T23:28:03,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741920_1096 (size=5242) 2024-12-03T23:28:03,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741921_1097 (size=8031) 2024-12-03T23:28:03,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741920_1096 (size=5242) 2024-12-03T23:28:03,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741920_1096 (size=5242) 2024-12-03T23:28:03,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:03,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:03,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741921_1097 (size=8031) 2024-12-03T23:28:03,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741921_1097 (size=8031) 2024-12-03T23:28:03,167 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203b80bab0b3b8d4e18b82b940422611cfc_69a8c10beb49ca5b02b44d5c1e779b0b to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241203b80bab0b3b8d4e18b82b940422611cfc_69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:03,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/.tmp/cf/1dfd80d56f694d5da8e504ef56c93a6e, store: [table=testtb-testExportWithTargetName family=cf region=69a8c10beb49ca5b02b44d5c1e779b0b] 2024-12-03T23:28:03,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/.tmp/cf/1dfd80d56f694d5da8e504ef56c93a6e is 208, key is 0231654e87da3031215b68749396896d0/cf:q/1733268482907/Put/seqid=0 2024-12-03T23:28:03,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741922_1098 (size=6322) 2024-12-03T23:28:03,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741922_1098 (size=6322) 2024-12-03T23:28:03,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741922_1098 (size=6322) 2024-12-03T23:28:03,175 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/.tmp/cf/1dfd80d56f694d5da8e504ef56c93a6e 2024-12-03T23:28:03,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/.tmp/cf/1dfd80d56f694d5da8e504ef56c93a6e as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/cf/1dfd80d56f694d5da8e504ef56c93a6e 2024-12-03T23:28:03,188 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412033c624b07ba6645cebb6e81c73b42bc02_250294dc7a876a930683b1f0a3a4ae1b to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202412033c624b07ba6645cebb6e81c73b42bc02_250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:03,188 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/cf/1dfd80d56f694d5da8e504ef56c93a6e, entries=5, sequenceid=6, filesize=6.2 K 2024-12-03T23:28:03,189 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 69a8c10beb49ca5b02b44d5c1e779b0b in 67ms, sequenceid=6, compaction requested=false 2024-12-03T23:28:03,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-03T23:28:03,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/.tmp/cf/a0e4af9ee5884d9ca30a0133ade43bab, store: [table=testtb-testExportWithTargetName family=cf region=250294dc7a876a930683b1f0a3a4ae1b] 2024-12-03T23:28:03,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for 69a8c10beb49ca5b02b44d5c1e779b0b: 2024-12-03T23:28:03,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. for snaptb0-testExportWithTargetName completed. 2024-12-03T23:28:03,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-03T23:28:03,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:03,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/cf/1dfd80d56f694d5da8e504ef56c93a6e] hfiles 2024-12-03T23:28:03,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/cf/1dfd80d56f694d5da8e504ef56c93a6e for snapshot=snaptb0-testExportWithTargetName 2024-12-03T23:28:03,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/.tmp/cf/a0e4af9ee5884d9ca30a0133ade43bab is 208, key is 1ed2fa7a5a1921d85e467d8aa56c19f41/cf:q/1733268482909/Put/seqid=0 2024-12-03T23:28:03,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741923_1099 (size=14541) 2024-12-03T23:28:03,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741923_1099 (size=14541) 2024-12-03T23:28:03,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741924_1100 (size=109) 2024-12-03T23:28:03,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741924_1100 (size=109) 2024-12-03T23:28:03,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741924_1100 (size=109) 2024-12-03T23:28:03,198 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:03,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741923_1099 (size=14541) 2024-12-03T23:28:03,199 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-03T23:28:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-12-03T23:28:03,199 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/.tmp/cf/a0e4af9ee5884d9ca30a0133ade43bab 2024-12-03T23:28:03,199 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:03,199 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:03,202 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b in 231 msec 2024-12-03T23:28:03,207 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/.tmp/cf/a0e4af9ee5884d9ca30a0133ade43bab as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/cf/a0e4af9ee5884d9ca30a0133ade43bab 2024-12-03T23:28:03,214 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/cf/a0e4af9ee5884d9ca30a0133ade43bab, entries=45, sequenceid=6, filesize=14.2 K 2024-12-03T23:28:03,215 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 250294dc7a876a930683b1f0a3a4ae1b in 92ms, sequenceid=6, compaction requested=false 2024-12-03T23:28:03,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for 250294dc7a876a930683b1f0a3a4ae1b: 2024-12-03T23:28:03,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. for snaptb0-testExportWithTargetName completed. 2024-12-03T23:28:03,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-03T23:28:03,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:03,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/cf/a0e4af9ee5884d9ca30a0133ade43bab] hfiles 2024-12-03T23:28:03,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/cf/a0e4af9ee5884d9ca30a0133ade43bab for snapshot=snaptb0-testExportWithTargetName 2024-12-03T23:28:03,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741925_1101 (size=109) 2024-12-03T23:28:03,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741925_1101 (size=109) 2024-12-03T23:28:03,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741925_1101 (size=109) 2024-12-03T23:28:03,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:03,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-12-03T23:28:03,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-12-03T23:28:03,224 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:03,224 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:03,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-12-03T23:28:03,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b in 256 msec 2024-12-03T23:28:03,227 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:28:03,227 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:28:03,228 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:28:03,228 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:28:03,228 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:03,230 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202412033c624b07ba6645cebb6e81c73b42bc02_250294dc7a876a930683b1f0a3a4ae1b, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241203b80bab0b3b8d4e18b82b940422611cfc_69a8c10beb49ca5b02b44d5c1e779b0b] hfiles 2024-12-03T23:28:03,231 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202412033c624b07ba6645cebb6e81c73b42bc02_250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:03,231 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241203b80bab0b3b8d4e18b82b940422611cfc_69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:03,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741926_1102 (size=293) 2024-12-03T23:28:03,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741926_1102 (size=293) 2024-12-03T23:28:03,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741926_1102 (size=293) 2024-12-03T23:28:03,247 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:28:03,247 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-03T23:28:03,248 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-03T23:28:03,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741927_1103 (size=959) 2024-12-03T23:28:03,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741927_1103 (size=959) 2024-12-03T23:28:03,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741927_1103 (size=959) 2024-12-03T23:28:03,264 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:28:03,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-03T23:28:03,270 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:28:03,271 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-03T23:28:03,272 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:28:03,272 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-03T23:28:03,273 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 319 msec 2024-12-03T23:28:03,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-03T23:28:03,580 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-03T23:28:03,580 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268483580 2024-12-03T23:28:03,581 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37331, tgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268483580, rawTgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268483580, srcFsUri=hdfs://localhost:37331, srcDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:03,608 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37331, inputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:03,608 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268483580, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268483580/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-03T23:28:03,610 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-03T23:28:03,615 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268483580/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-03T23:28:03,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741928_1104 (size=162) 2024-12-03T23:28:03,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741929_1105 (size=959) 2024-12-03T23:28:03,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741928_1104 (size=162) 2024-12-03T23:28:03,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741929_1105 (size=959) 2024-12-03T23:28:03,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741928_1104 (size=162) 2024-12-03T23:28:03,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741929_1105 (size=959) 2024-12-03T23:28:03,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741930_1106 (size=154) 2024-12-03T23:28:03,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741930_1106 (size=154) 2024-12-03T23:28:03,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741930_1106 (size=154) 2024-12-03T23:28:03,644 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:03,644 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:03,645 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:04,628 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-16663429089027102665.jar 2024-12-03T23:28:04,628 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:04,629 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:04,690 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-17484431338835113704.jar 2024-12-03T23:28:04,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:04,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:04,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:04,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:04,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:04,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:04,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-03T23:28:04,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-03T23:28:04,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-03T23:28:04,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-03T23:28:04,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-03T23:28:04,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-03T23:28:04,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-03T23:28:04,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-03T23:28:04,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-03T23:28:04,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-03T23:28:04,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-03T23:28:04,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:28:04,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:28:04,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:28:04,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:28:04,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:28:04,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:28:04,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:28:04,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741931_1107 (size=131440) 2024-12-03T23:28:04,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741931_1107 (size=131440) 2024-12-03T23:28:04,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741931_1107 (size=131440) 2024-12-03T23:28:04,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741932_1108 (size=4188619) 2024-12-03T23:28:04,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741932_1108 (size=4188619) 2024-12-03T23:28:04,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741932_1108 (size=4188619) 2024-12-03T23:28:04,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741933_1109 (size=1323991) 2024-12-03T23:28:04,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741933_1109 (size=1323991) 2024-12-03T23:28:04,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741933_1109 (size=1323991) 2024-12-03T23:28:04,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741934_1110 (size=903924) 2024-12-03T23:28:04,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741934_1110 (size=903924) 2024-12-03T23:28:04,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741934_1110 (size=903924) 2024-12-03T23:28:04,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741935_1111 (size=8360360) 2024-12-03T23:28:04,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741935_1111 (size=8360360) 2024-12-03T23:28:04,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741935_1111 (size=8360360) 2024-12-03T23:28:04,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741936_1112 (size=1877034) 2024-12-03T23:28:04,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741936_1112 (size=1877034) 2024-12-03T23:28:04,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741936_1112 (size=1877034) 2024-12-03T23:28:04,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741937_1113 (size=77835) 2024-12-03T23:28:04,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741937_1113 (size=77835) 2024-12-03T23:28:04,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741937_1113 (size=77835) 2024-12-03T23:28:04,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741938_1114 (size=6424741) 2024-12-03T23:28:04,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741938_1114 (size=6424741) 2024-12-03T23:28:04,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741938_1114 (size=6424741) 2024-12-03T23:28:04,899 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0001_000001 (auth:SIMPLE) from 127.0.0.1:44586 2024-12-03T23:28:04,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741939_1115 (size=30949) 2024-12-03T23:28:04,910 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0001/container_1733268377251_0001_01_000001/launch_container.sh] 2024-12-03T23:28:04,910 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0001/container_1733268377251_0001_01_000001/container_tokens] 2024-12-03T23:28:04,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741939_1115 (size=30949) 2024-12-03T23:28:04,910 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0001/container_1733268377251_0001_01_000001/sysfs] 2024-12-03T23:28:04,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741939_1115 (size=30949) 2024-12-03T23:28:04,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741940_1116 (size=1597213) 2024-12-03T23:28:04,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741940_1116 (size=1597213) 2024-12-03T23:28:04,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741940_1116 (size=1597213) 2024-12-03T23:28:04,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741941_1117 (size=443172) 2024-12-03T23:28:04,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741941_1117 (size=443172) 2024-12-03T23:28:04,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741941_1117 (size=443172) 2024-12-03T23:28:04,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741942_1118 (size=4695811) 2024-12-03T23:28:04,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741942_1118 (size=4695811) 2024-12-03T23:28:04,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741942_1118 (size=4695811) 2024-12-03T23:28:04,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741943_1119 (size=232957) 2024-12-03T23:28:04,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741943_1119 (size=232957) 2024-12-03T23:28:04,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741943_1119 (size=232957) 2024-12-03T23:28:04,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741944_1120 (size=127628) 2024-12-03T23:28:04,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741944_1120 (size=127628) 2024-12-03T23:28:04,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741944_1120 (size=127628) 2024-12-03T23:28:04,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741945_1121 (size=20406) 2024-12-03T23:28:04,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741945_1121 (size=20406) 2024-12-03T23:28:04,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741945_1121 (size=20406) 2024-12-03T23:28:04,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741946_1122 (size=5175431) 2024-12-03T23:28:04,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741946_1122 (size=5175431) 2024-12-03T23:28:05,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741946_1122 (size=5175431) 2024-12-03T23:28:05,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741947_1123 (size=217634) 2024-12-03T23:28:05,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741947_1123 (size=217634) 2024-12-03T23:28:05,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741947_1123 (size=217634) 2024-12-03T23:28:05,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741948_1124 (size=1832290) 2024-12-03T23:28:05,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741948_1124 (size=1832290) 2024-12-03T23:28:05,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741948_1124 (size=1832290) 2024-12-03T23:28:05,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741949_1125 (size=322274) 2024-12-03T23:28:05,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741949_1125 (size=322274) 2024-12-03T23:28:05,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741949_1125 (size=322274) 2024-12-03T23:28:05,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741950_1126 (size=503880) 2024-12-03T23:28:05,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741950_1126 (size=503880) 2024-12-03T23:28:05,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741950_1126 (size=503880) 2024-12-03T23:28:05,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741951_1127 (size=29229) 2024-12-03T23:28:05,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741951_1127 (size=29229) 2024-12-03T23:28:05,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741951_1127 (size=29229) 2024-12-03T23:28:05,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741952_1128 (size=24096) 2024-12-03T23:28:05,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741952_1128 (size=24096) 2024-12-03T23:28:05,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741952_1128 (size=24096) 2024-12-03T23:28:05,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741953_1129 (size=111872) 2024-12-03T23:28:05,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741953_1129 (size=111872) 2024-12-03T23:28:05,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741953_1129 (size=111872) 2024-12-03T23:28:05,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741954_1130 (size=45609) 2024-12-03T23:28:05,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741954_1130 (size=45609) 2024-12-03T23:28:05,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741954_1130 (size=45609) 2024-12-03T23:28:05,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741955_1131 (size=136454) 2024-12-03T23:28:05,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741955_1131 (size=136454) 2024-12-03T23:28:05,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741955_1131 (size=136454) 2024-12-03T23:28:05,140 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-03T23:28:05,143 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-03T23:28:05,146 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.2 K 2024-12-03T23:28:05,146 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-12-03T23:28:05,146 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.2 K 2024-12-03T23:28:05,146 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-03T23:28:05,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741956_1132 (size=1031) 2024-12-03T23:28:05,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741956_1132 (size=1031) 2024-12-03T23:28:05,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741956_1132 (size=1031) 2024-12-03T23:28:05,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741957_1133 (size=35) 2024-12-03T23:28:05,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741957_1133 (size=35) 2024-12-03T23:28:05,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741957_1133 (size=35) 2024-12-03T23:28:05,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741958_1134 (size=304003) 2024-12-03T23:28:05,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741958_1134 (size=304003) 2024-12-03T23:28:05,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741958_1134 (size=304003) 2024-12-03T23:28:05,638 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:28:05,638 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:28:05,901 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0002_000001 (auth:SIMPLE) from 127.0.0.1:44590 2024-12-03T23:28:06,616 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:28:08,447 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:28:09,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-03T23:28:09,918 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-03T23:28:09,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:09,919 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-03T23:28:11,533 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0002_000001 (auth:SIMPLE) from 127.0.0.1:34700 2024-12-03T23:28:11,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741959_1135 (size=349701) 2024-12-03T23:28:11,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741959_1135 (size=349701) 2024-12-03T23:28:11,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741959_1135 (size=349701) 2024-12-03T23:28:12,467 WARN [regionserver/ed8014902436:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 4, running: 1 2024-12-03T23:28:13,828 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0002_000001 (auth:SIMPLE) from 127.0.0.1:43878 2024-12-03T23:28:13,828 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0002_000001 (auth:SIMPLE) from 127.0.0.1:40240 2024-12-03T23:28:14,638 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0002_000001 (auth:SIMPLE) from 127.0.0.1:43890 2024-12-03T23:28:14,639 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0002_000001 (auth:SIMPLE) from 127.0.0.1:40248 2024-12-03T23:28:15,422 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:28:15,971 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 250294dc7a876a930683b1f0a3a4ae1b changed from -1.0 to 0.0, refreshing cache 2024-12-03T23:28:15,971 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 69a8c10beb49ca5b02b44d5c1e779b0b changed from -1.0 to 0.0, refreshing cache 2024-12-03T23:28:16,996 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0002_01_000006 while processing FINISH_CONTAINERS event 2024-12-03T23:28:18,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741960_1136 (size=14541) 2024-12-03T23:28:18,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741960_1136 (size=14541) 2024-12-03T23:28:18,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741960_1136 (size=14541) 2024-12-03T23:28:20,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741962_1138 (size=6322) 2024-12-03T23:28:20,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741962_1138 (size=6322) 2024-12-03T23:28:20,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741962_1138 (size=6322) 2024-12-03T23:28:21,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741963_1139 (size=8031) 2024-12-03T23:28:21,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741963_1139 (size=8031) 2024-12-03T23:28:21,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741963_1139 (size=8031) 2024-12-03T23:28:21,276 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000004/launch_container.sh] 2024-12-03T23:28:21,277 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000004/container_tokens] 2024-12-03T23:28:21,277 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000004/sysfs] 2024-12-03T23:28:21,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741964_1140 (size=5242) 2024-12-03T23:28:21,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741964_1140 (size=5242) 2024-12-03T23:28:21,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741964_1140 (size=5242) 2024-12-03T23:28:21,435 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000003/launch_container.sh] 2024-12-03T23:28:21,435 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000003/container_tokens] 2024-12-03T23:28:21,436 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000003/sysfs] 2024-12-03T23:28:21,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741961_1137 (size=31743) 2024-12-03T23:28:21,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741961_1137 (size=31743) 2024-12-03T23:28:21,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741961_1137 (size=31743) 2024-12-03T23:28:21,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741965_1141 (size=465) 2024-12-03T23:28:21,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741965_1141 (size=465) 2024-12-03T23:28:21,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741965_1141 (size=465) 2024-12-03T23:28:21,514 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000005/launch_container.sh] 2024-12-03T23:28:21,515 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000005/container_tokens] 2024-12-03T23:28:21,515 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000005/sysfs] 2024-12-03T23:28:21,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741966_1142 (size=31743) 2024-12-03T23:28:21,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741966_1142 (size=31743) 2024-12-03T23:28:21,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741966_1142 (size=31743) 2024-12-03T23:28:21,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741967_1143 (size=349701) 2024-12-03T23:28:21,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741967_1143 (size=349701) 2024-12-03T23:28:21,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741967_1143 (size=349701) 2024-12-03T23:28:21,581 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0002_000001 (auth:SIMPLE) from 127.0.0.1:40256 2024-12-03T23:28:21,588 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0002_000001 (auth:SIMPLE) from 127.0.0.1:40272 2024-12-03T23:28:21,593 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0002_000001 (auth:SIMPLE) from 127.0.0.1:43898 2024-12-03T23:28:22,886 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-03T23:28:22,888 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-03T23:28:22,935 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-12-03T23:28:22,936 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-03T23:28:22,936 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-03T23:28:22,936 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-03T23:28:22,937 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-03T23:28:22,937 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-03T23:28:22,937 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268483580/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268483580/.hbase-snapshot/testExportWithTargetName 2024-12-03T23:28:22,938 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268483580/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-03T23:28:22,938 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268483580/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-03T23:28:22,951 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-03T23:28:22,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-03T23:28:22,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-03T23:28:22,963 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268502963"}]},"ts":"1733268502963"} 2024-12-03T23:28:22,967 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-03T23:28:22,967 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-03T23:28:22,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-03T23:28:22,971 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=69a8c10beb49ca5b02b44d5c1e779b0b, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=250294dc7a876a930683b1f0a3a4ae1b, UNASSIGN}] 2024-12-03T23:28:22,973 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=250294dc7a876a930683b1f0a3a4ae1b, UNASSIGN 2024-12-03T23:28:22,973 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=69a8c10beb49ca5b02b44d5c1e779b0b, UNASSIGN 2024-12-03T23:28:22,975 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=69a8c10beb49ca5b02b44d5c1e779b0b, regionState=CLOSING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:28:22,975 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=250294dc7a876a930683b1f0a3a4ae1b, regionState=CLOSING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:28:22,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=250294dc7a876a930683b1f0a3a4ae1b, UNASSIGN because future has completed 2024-12-03T23:28:22,980 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:28:22,980 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:28:22,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=69a8c10beb49ca5b02b44d5c1e779b0b, UNASSIGN because future has completed 2024-12-03T23:28:22,982 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:28:22,982 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:28:23,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-03T23:28:23,121 ERROR [ProcedureDispatcherTimeoutThread {}] procedure2.RemoteProcedureDispatcher$TimeoutExecutorThread(331): DelayQueue for RemoteProcedureDispatcher is not empty when timed waiting elapsed. If this is repeated consistently, it means no element is getting expired from the queue and it might freeze the system. Queue: [containedObject=ed8014902436,39085,1733268370401, timeout=1733268503133, delay=11, operations=[pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b, server=ed8014902436,39085,1733268370401], containedObject=ed8014902436,41121,1733268370525, timeout=1733268503135, delay=13, operations=[pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b, server=ed8014902436,41121,1733268370525]] 2024-12-03T23:28:23,134 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:23,134 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:28:23,134 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing 250294dc7a876a930683b1f0a3a4ae1b, disabling compactions & flushes 2024-12-03T23:28:23,134 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:23,134 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:23,135 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. after waiting 0 ms 2024-12-03T23:28:23,135 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:23,136 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:23,136 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:28:23,136 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing 69a8c10beb49ca5b02b44d5c1e779b0b, disabling compactions & flushes 2024-12-03T23:28:23,136 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:23,136 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:23,136 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. after waiting 0 ms 2024-12-03T23:28:23,137 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:23,141 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:28:23,143 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:28:23,143 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b. 2024-12-03T23:28:23,143 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for 250294dc7a876a930683b1f0a3a4ae1b: Waiting for close lock at 1733268503134Running coprocessor pre-close hooks at 1733268503134Disabling compacts and flushes for region at 1733268503134Disabling writes for close at 1733268503135 (+1 ms)Writing region close event to WAL at 1733268503137 (+2 ms)Running coprocessor post-close hooks at 1733268503143 (+6 ms)Closed at 1733268503143 2024-12-03T23:28:23,144 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:28:23,144 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:28:23,144 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b. 2024-12-03T23:28:23,144 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for 69a8c10beb49ca5b02b44d5c1e779b0b: Waiting for close lock at 1733268503136Running coprocessor pre-close hooks at 1733268503136Disabling compacts and flushes for region at 1733268503136Disabling writes for close at 1733268503137 (+1 ms)Writing region close event to WAL at 1733268503137Running coprocessor post-close hooks at 1733268503144 (+7 ms)Closed at 1733268503144 2024-12-03T23:28:23,145 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed 250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:23,146 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=250294dc7a876a930683b1f0a3a4ae1b, regionState=CLOSED 2024-12-03T23:28:23,146 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed 69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:23,148 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=69a8c10beb49ca5b02b44d5c1e779b0b, regionState=CLOSED 2024-12-03T23:28:23,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:28:23,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:28:23,154 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=59 2024-12-03T23:28:23,154 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure 250294dc7a876a930683b1f0a3a4ae1b, server=ed8014902436,39085,1733268370401 in 172 msec 2024-12-03T23:28:23,156 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=250294dc7a876a930683b1f0a3a4ae1b, UNASSIGN in 184 msec 2024-12-03T23:28:23,157 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=58 2024-12-03T23:28:23,157 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure 69a8c10beb49ca5b02b44d5c1e779b0b, server=ed8014902436,41121,1733268370525 in 173 msec 2024-12-03T23:28:23,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=58, resume processing ppid=57 2024-12-03T23:28:23,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=69a8c10beb49ca5b02b44d5c1e779b0b, UNASSIGN in 186 msec 2024-12-03T23:28:23,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-12-03T23:28:23,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 197 msec 2024-12-03T23:28:23,177 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268503176"}]},"ts":"1733268503176"} 2024-12-03T23:28:23,180 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-03T23:28:23,180 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-03T23:28:23,189 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 230 msec 2024-12-03T23:28:23,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-03T23:28:23,270 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-03T23:28:23,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-03T23:28:23,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-03T23:28:23,272 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-03T23:28:23,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-03T23:28:23,273 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-03T23:28:23,276 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-03T23:28:23,280 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:23,280 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:23,282 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/recovered.edits] 2024-12-03T23:28:23,282 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/recovered.edits] 2024-12-03T23:28:23,294 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/cf/1dfd80d56f694d5da8e504ef56c93a6e to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/cf/1dfd80d56f694d5da8e504ef56c93a6e 2024-12-03T23:28:23,294 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/cf/a0e4af9ee5884d9ca30a0133ade43bab to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/cf/a0e4af9ee5884d9ca30a0133ade43bab 2024-12-03T23:28:23,297 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b/recovered.edits/9.seqid 2024-12-03T23:28:23,297 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b/recovered.edits/9.seqid 2024-12-03T23:28:23,297 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:23,297 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithTargetName/69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:23,298 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-03T23:28:23,298 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-03T23:28:23,299 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-12-03T23:28:23,304 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202412033c624b07ba6645cebb6e81c73b42bc02_250294dc7a876a930683b1f0a3a4ae1b to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202412033c624b07ba6645cebb6e81c73b42bc02_250294dc7a876a930683b1f0a3a4ae1b 2024-12-03T23:28:23,306 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241203b80bab0b3b8d4e18b82b940422611cfc_69a8c10beb49ca5b02b44d5c1e779b0b to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241203b80bab0b3b8d4e18b82b940422611cfc_69a8c10beb49ca5b02b44d5c1e779b0b 2024-12-03T23:28:23,306 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-03T23:28:23,309 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-03T23:28:23,313 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-03T23:28:23,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-03T23:28:23,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-03T23:28:23,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-03T23:28:23,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-03T23:28:23,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-03T23:28:23,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-03T23:28:23,333 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-03T23:28:23,333 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-03T23:28:23,335 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-03T23:28:23,337 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-03T23:28:23,337 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-03T23:28:23,338 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268503337"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:23,338 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268503337"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:23,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-03T23:28:23,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:23,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-03T23:28:23,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:23,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-03T23:28:23,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:23,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-03T23:28:23,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:23,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-03T23:28:23,342 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-03T23:28:23,342 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 69a8c10beb49ca5b02b44d5c1e779b0b, NAME => 'testtb-testExportWithTargetName,,1733268481900.69a8c10beb49ca5b02b44d5c1e779b0b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 250294dc7a876a930683b1f0a3a4ae1b, NAME => 'testtb-testExportWithTargetName,1,1733268481900.250294dc7a876a930683b1f0a3a4ae1b.', STARTKEY => '1', ENDKEY => ''}] 2024-12-03T23:28:23,342 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-03T23:28:23,342 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733268503342"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:23,345 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-12-03T23:28:23,346 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-03T23:28:23,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 77 msec 2024-12-03T23:28:23,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-03T23:28:23,450 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-12-03T23:28:23,450 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-03T23:28:23,459 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-12-03T23:28:23,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-03T23:28:23,463 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-12-03T23:28:23,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-03T23:28:23,496 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=797 (was 759) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1165850966_1 at /127.0.0.1:50776 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:34235 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:39351 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1165850966_1 at /127.0.0.1:60328 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:39351 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34235 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 130590) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34591 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:43572 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39351 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39351 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44365 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:43376 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/ed8014902436:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/ed8014902436:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/ed8014902436:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2198 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=819 (was 805) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=529 (was 444) - SystemLoadAverage LEAK? -, ProcessCount=22 (was 19) - ProcessCount LEAK? -, AvailableMemoryMB=1946 (was 605) - AvailableMemoryMB LEAK? - 2024-12-03T23:28:23,496 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-03T23:28:23,522 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=797, OpenFileDescriptor=819, MaxFileDescriptor=1048576, SystemLoadAverage=529, ProcessCount=22, AvailableMemoryMB=1945 2024-12-03T23:28:23,522 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-03T23:28:23,525 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:28:23,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-03T23:28:23,528 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:28:23,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-12-03T23:28:23,531 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:28:23,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-03T23:28:23,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741968_1144 (size=440) 2024-12-03T23:28:23,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741968_1144 (size=440) 2024-12-03T23:28:23,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741968_1144 (size=440) 2024-12-03T23:28:23,551 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 800e371e963fd783c084d4294a35b750, NAME => 'testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:23,552 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ed0c58842ad93c456247a9c71ef7bba5, NAME => 'testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:23,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741970_1146 (size=65) 2024-12-03T23:28:23,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741970_1146 (size=65) 2024-12-03T23:28:23,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741970_1146 (size=65) 2024-12-03T23:28:23,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741969_1145 (size=65) 2024-12-03T23:28:23,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741969_1145 (size=65) 2024-12-03T23:28:23,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741969_1145 (size=65) 2024-12-03T23:28:23,582 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:23,583 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 800e371e963fd783c084d4294a35b750, disabling compactions & flushes 2024-12-03T23:28:23,583 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:23,583 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:23,583 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. after waiting 0 ms 2024-12-03T23:28:23,583 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:23,583 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:23,583 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 800e371e963fd783c084d4294a35b750: Waiting for close lock at 1733268503583Disabling compacts and flushes for region at 1733268503583Disabling writes for close at 1733268503583Writing region close event to WAL at 1733268503583Closed at 1733268503583 2024-12-03T23:28:23,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-03T23:28:23,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-03T23:28:23,978 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:23,978 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing ed0c58842ad93c456247a9c71ef7bba5, disabling compactions & flushes 2024-12-03T23:28:23,978 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:23,978 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:23,979 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. after waiting 0 ms 2024-12-03T23:28:23,979 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:23,979 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:23,979 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for ed0c58842ad93c456247a9c71ef7bba5: Waiting for close lock at 1733268503978Disabling compacts and flushes for region at 1733268503978Disabling writes for close at 1733268503979 (+1 ms)Writing region close event to WAL at 1733268503979Closed at 1733268503979 2024-12-03T23:28:23,980 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:28:23,981 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733268503980"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268503980"}]},"ts":"1733268503980"} 2024-12-03T23:28:23,981 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733268503980"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268503980"}]},"ts":"1733268503980"} 2024-12-03T23:28:23,984 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-03T23:28:23,985 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:28:23,985 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268503985"}]},"ts":"1733268503985"} 2024-12-03T23:28:23,988 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-03T23:28:23,989 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:28:23,990 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:28:23,990 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:28:23,990 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:28:23,990 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:28:23,990 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:28:23,990 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:28:23,990 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:28:23,990 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:28:23,990 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:28:23,990 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:28:23,991 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=800e371e963fd783c084d4294a35b750, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ed0c58842ad93c456247a9c71ef7bba5, ASSIGN}] 2024-12-03T23:28:23,992 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=800e371e963fd783c084d4294a35b750, ASSIGN 2024-12-03T23:28:23,992 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ed0c58842ad93c456247a9c71ef7bba5, ASSIGN 2024-12-03T23:28:23,994 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ed0c58842ad93c456247a9c71ef7bba5, ASSIGN; state=OFFLINE, location=ed8014902436,37351,1733268370600; forceNewPlan=false, retain=false 2024-12-03T23:28:23,994 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=800e371e963fd783c084d4294a35b750, ASSIGN; state=OFFLINE, location=ed8014902436,39085,1733268370401; forceNewPlan=false, retain=false 2024-12-03T23:28:24,092 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000002/launch_container.sh] 2024-12-03T23:28:24,092 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000002/container_tokens] 2024-12-03T23:28:24,092 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000002/sysfs] 2024-12-03T23:28:24,144 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:28:24,145 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=ed0c58842ad93c456247a9c71ef7bba5, regionState=OPENING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:28:24,145 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=800e371e963fd783c084d4294a35b750, regionState=OPENING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:28:24,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ed0c58842ad93c456247a9c71ef7bba5, ASSIGN because future has completed 2024-12-03T23:28:24,147 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed0c58842ad93c456247a9c71ef7bba5, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:28:24,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=800e371e963fd783c084d4294a35b750, ASSIGN because future has completed 2024-12-03T23:28:24,150 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 800e371e963fd783c084d4294a35b750, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:28:24,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-03T23:28:24,304 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:24,305 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => 800e371e963fd783c084d4294a35b750, NAME => 'testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750.', STARTKEY => '', ENDKEY => '1'} 2024-12-03T23:28:24,305 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. service=AccessControlService 2024-12-03T23:28:24,305 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:28:24,306 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,306 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:24,306 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,306 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,306 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:24,306 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => ed0c58842ad93c456247a9c71ef7bba5, NAME => 'testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5.', STARTKEY => '1', ENDKEY => ''} 2024-12-03T23:28:24,307 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. service=AccessControlService 2024-12-03T23:28:24,307 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:28:24,307 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,307 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:24,307 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,307 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,308 INFO [StoreOpener-800e371e963fd783c084d4294a35b750-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,309 INFO [StoreOpener-ed0c58842ad93c456247a9c71ef7bba5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,309 INFO [StoreOpener-800e371e963fd783c084d4294a35b750-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 800e371e963fd783c084d4294a35b750 columnFamilyName cf 2024-12-03T23:28:24,310 INFO [StoreOpener-ed0c58842ad93c456247a9c71ef7bba5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed0c58842ad93c456247a9c71ef7bba5 columnFamilyName cf 2024-12-03T23:28:24,311 DEBUG [StoreOpener-800e371e963fd783c084d4294a35b750-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:24,311 DEBUG [StoreOpener-ed0c58842ad93c456247a9c71ef7bba5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:24,311 INFO [StoreOpener-ed0c58842ad93c456247a9c71ef7bba5-1 {}] regionserver.HStore(327): Store=ed0c58842ad93c456247a9c71ef7bba5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:28:24,311 INFO [StoreOpener-800e371e963fd783c084d4294a35b750-1 {}] regionserver.HStore(327): Store=800e371e963fd783c084d4294a35b750/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:28:24,311 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,312 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,312 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,312 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,312 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,313 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,313 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,313 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,313 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,313 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,314 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,315 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,316 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:28:24,317 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened ed0c58842ad93c456247a9c71ef7bba5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69799131, jitterRate=0.04008810222148895}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:28:24,317 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,317 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:28:24,318 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for ed0c58842ad93c456247a9c71ef7bba5: Running coprocessor pre-open hook at 1733268504307Writing region info on filesystem at 1733268504307Initializing all the Stores at 1733268504308 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268504308Cleaning up temporary data from old regions at 1733268504313 (+5 ms)Running coprocessor post-open hooks at 1733268504317 (+4 ms)Region opened successfully at 1733268504318 (+1 ms) 2024-12-03T23:28:24,318 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened 800e371e963fd783c084d4294a35b750; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60206084, jitterRate=-0.10285943746566772}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:28:24,318 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,318 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for 800e371e963fd783c084d4294a35b750: Running coprocessor pre-open hook at 1733268504306Writing region info on filesystem at 1733268504306Initializing all the Stores at 1733268504307 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268504307Cleaning up temporary data from old regions at 1733268504313 (+6 ms)Running coprocessor post-open hooks at 1733268504318 (+5 ms)Region opened successfully at 1733268504318 2024-12-03T23:28:24,319 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750., pid=67, masterSystemTime=1733268504301 2024-12-03T23:28:24,319 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5., pid=66, masterSystemTime=1733268504301 2024-12-03T23:28:24,322 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:24,322 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:24,322 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=ed0c58842ad93c456247a9c71ef7bba5, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:28:24,323 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:24,324 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=800e371e963fd783c084d4294a35b750, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:28:24,324 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:24,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed0c58842ad93c456247a9c71ef7bba5, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:28:24,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 800e371e963fd783c084d4294a35b750, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:28:24,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=65 2024-12-03T23:28:24,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure ed0c58842ad93c456247a9c71ef7bba5, server=ed8014902436,37351,1733268370600 in 184 msec 2024-12-03T23:28:24,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=64 2024-12-03T23:28:24,335 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ed0c58842ad93c456247a9c71ef7bba5, ASSIGN in 344 msec 2024-12-03T23:28:24,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure 800e371e963fd783c084d4294a35b750, server=ed8014902436,39085,1733268370401 in 183 msec 2024-12-03T23:28:24,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=64, resume processing ppid=63 2024-12-03T23:28:24,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=800e371e963fd783c084d4294a35b750, ASSIGN in 345 msec 2024-12-03T23:28:24,339 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:28:24,339 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268504339"}]},"ts":"1733268504339"} 2024-12-03T23:28:24,342 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-03T23:28:24,343 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:28:24,343 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-03T23:28:24,347 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-03T23:28:24,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:24,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:24,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:24,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:24,405 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:24,405 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:24,405 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:24,405 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:24,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 880 msec 2024-12-03T23:28:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-03T23:28:24,671 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-03T23:28:24,671 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:24,677 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-03T23:28:24,677 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:24,677 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:28:24,679 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:24,685 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:24,692 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:24,695 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-03T23:28:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268504695 (current time:1733268504695). 2024-12-03T23:28:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:28:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-03T23:28:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:28:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18c152bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:24,698 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:24,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:24,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:24,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23df6a6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:24,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:24,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:24,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:24,701 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43356, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:24,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43d1f868, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:24,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:24,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:24,706 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52018, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:24,708 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:28:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:24,708 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40c8fc8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:24,709 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:24,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:24,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:24,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9da9cb9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:24,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:24,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:24,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:24,711 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43362, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:24,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e85c7f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:24,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:24,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:24,714 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52020, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:24,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:28:24,717 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:28:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:24,717 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-03T23:28:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:28:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-03T23:28:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-03T23:28:24,719 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:28:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-03T23:28:24,720 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:28:24,722 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:28:24,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741971_1147 (size=161) 2024-12-03T23:28:24,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741971_1147 (size=161) 2024-12-03T23:28:24,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741971_1147 (size=161) 2024-12-03T23:28:24,730 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:28:24,730 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 800e371e963fd783c084d4294a35b750}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed0c58842ad93c456247a9c71ef7bba5}] 2024-12-03T23:28:24,731 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,731 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-03T23:28:24,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-03T23:28:24,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-03T23:28:24,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:24,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:24,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for ed0c58842ad93c456247a9c71ef7bba5: 2024-12-03T23:28:24,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for 800e371e963fd783c084d4294a35b750: 2024-12-03T23:28:24,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-03T23:28:24,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-03T23:28:24,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-03T23:28:24,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-03T23:28:24,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:24,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:24,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:28:24,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:28:24,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741973_1149 (size=68) 2024-12-03T23:28:24,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741973_1149 (size=68) 2024-12-03T23:28:24,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741973_1149 (size=68) 2024-12-03T23:28:24,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:24,902 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-03T23:28:24,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-12-03T23:28:24,902 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,903 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:24,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741972_1148 (size=68) 2024-12-03T23:28:24,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741972_1148 (size=68) 2024-12-03T23:28:24,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741972_1148 (size=68) 2024-12-03T23:28:24,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:24,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-03T23:28:24,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-12-03T23:28:24,906 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,906 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:24,906 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ed0c58842ad93c456247a9c71ef7bba5 in 174 msec 2024-12-03T23:28:24,909 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=69, resume processing ppid=68 2024-12-03T23:28:24,909 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:28:24,909 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 800e371e963fd783c084d4294a35b750 in 177 msec 2024-12-03T23:28:24,910 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:28:24,911 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:28:24,911 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:28:24,911 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:24,911 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-03T23:28:24,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741974_1150 (size=60) 2024-12-03T23:28:24,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741974_1150 (size=60) 2024-12-03T23:28:24,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741974_1150 (size=60) 2024-12-03T23:28:24,921 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:28:24,921 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-03T23:28:24,922 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-03T23:28:24,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741975_1151 (size=641) 2024-12-03T23:28:24,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741975_1151 (size=641) 2024-12-03T23:28:24,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741975_1151 (size=641) 2024-12-03T23:28:24,952 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:28:24,959 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:28:24,960 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-03T23:28:24,961 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:28:24,962 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-03T23:28:24,963 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 244 msec 2024-12-03T23:28:25,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-03T23:28:25,041 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-03T23:28:25,053 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:28:25,055 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37351 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:28:25,056 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:25,059 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-03T23:28:25,059 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:25,059 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:28:25,061 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:25,067 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:25,075 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:25,079 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-03T23:28:25,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268505079 (current time:1733268505079). 2024-12-03T23:28:25,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:28:25,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-03T23:28:25,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:28:25,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46aa051f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:25,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:25,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:25,081 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:25,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:25,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:25,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26cd402, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:25,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:25,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:25,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:25,082 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43386, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:25,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@152ddc2f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:25,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:25,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:25,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:25,086 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52026, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:25,087 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:28:25,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:25,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:25,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:25,088 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:25,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e4d4130, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:25,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:25,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:25,090 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:25,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:25,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:25,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ef0d72f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:25,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:25,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:25,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:25,092 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43400, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:25,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@227743d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:25,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:25,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:25,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:25,097 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52038, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:25,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:28:25,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:28:25,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:25,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:25,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:25,101 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:25,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-03T23:28:25,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:28:25,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-03T23:28:25,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-03T23:28:25,105 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:28:25,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-03T23:28:25,106 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:28:25,109 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:28:25,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741976_1152 (size=156) 2024-12-03T23:28:25,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741976_1152 (size=156) 2024-12-03T23:28:25,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741976_1152 (size=156) 2024-12-03T23:28:25,122 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:28:25,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 800e371e963fd783c084d4294a35b750}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed0c58842ad93c456247a9c71ef7bba5}] 2024-12-03T23:28:25,123 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:25,124 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:25,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-03T23:28:25,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-03T23:28:25,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-03T23:28:25,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:25,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:25,277 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing 800e371e963fd783c084d4294a35b750 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-03T23:28:25,277 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing ed0c58842ad93c456247a9c71ef7bba5 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-03T23:28:25,299 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412038af39e906ceb4f03bbf482bde966d73a_800e371e963fd783c084d4294a35b750 is 71, key is 037a97f0e4a770f201424aaece99be32/cf:q/1733268505053/Put/seqid=0 2024-12-03T23:28:25,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203ca69e17b0dc3423782ef73d08290774d_ed0c58842ad93c456247a9c71ef7bba5 is 71, key is 13a19971d8143be459c68abcb3db5601/cf:q/1733268505055/Put/seqid=0 2024-12-03T23:28:25,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741977_1153 (size=5172) 2024-12-03T23:28:25,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741977_1153 (size=5172) 2024-12-03T23:28:25,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:25,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741977_1153 (size=5172) 2024-12-03T23:28:25,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741978_1154 (size=8101) 2024-12-03T23:28:25,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741978_1154 (size=8101) 2024-12-03T23:28:25,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741978_1154 (size=8101) 2024-12-03T23:28:25,321 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412038af39e906ceb4f03bbf482bde966d73a_800e371e963fd783c084d4294a35b750 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202412038af39e906ceb4f03bbf482bde966d73a_800e371e963fd783c084d4294a35b750 2024-12-03T23:28:25,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/.tmp/cf/ebfb8c49d4d34194a5862f85b5445c74, store: [table=testtb-testExportWithResetTtl family=cf region=800e371e963fd783c084d4294a35b750] 2024-12-03T23:28:25,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/.tmp/cf/ebfb8c49d4d34194a5862f85b5445c74 is 206, key is 09b6460454f401f074fca14576cc8166d/cf:q/1733268505053/Put/seqid=0 2024-12-03T23:28:25,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741979_1155 (size=6108) 2024-12-03T23:28:25,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741979_1155 (size=6108) 2024-12-03T23:28:25,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741979_1155 (size=6108) 2024-12-03T23:28:25,329 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/.tmp/cf/ebfb8c49d4d34194a5862f85b5445c74 2024-12-03T23:28:25,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/.tmp/cf/ebfb8c49d4d34194a5862f85b5445c74 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/cf/ebfb8c49d4d34194a5862f85b5445c74 2024-12-03T23:28:25,341 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/cf/ebfb8c49d4d34194a5862f85b5445c74, entries=4, sequenceid=6, filesize=6.0 K 2024-12-03T23:28:25,342 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 800e371e963fd783c084d4294a35b750 in 65ms, sequenceid=6, compaction requested=false 2024-12-03T23:28:25,342 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-03T23:28:25,342 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for 800e371e963fd783c084d4294a35b750: 2024-12-03T23:28:25,343 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. for snaptb0-testExportWithResetTtl completed. 2024-12-03T23:28:25,343 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-03T23:28:25,343 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:25,343 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/cf/ebfb8c49d4d34194a5862f85b5445c74] hfiles 2024-12-03T23:28:25,343 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/cf/ebfb8c49d4d34194a5862f85b5445c74 for snapshot=snaptb0-testExportWithResetTtl 2024-12-03T23:28:25,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741980_1156 (size=107) 2024-12-03T23:28:25,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741980_1156 (size=107) 2024-12-03T23:28:25,350 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:25,350 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-03T23:28:25,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-12-03T23:28:25,351 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:25,351 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:25,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741980_1156 (size=107) 2024-12-03T23:28:25,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 800e371e963fd783c084d4294a35b750 in 230 msec 2024-12-03T23:28:25,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-03T23:28:25,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:25,723 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203ca69e17b0dc3423782ef73d08290774d_ed0c58842ad93c456247a9c71ef7bba5 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241203ca69e17b0dc3423782ef73d08290774d_ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:25,725 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/.tmp/cf/280882a0d58745f9bc871972c812f83b, store: [table=testtb-testExportWithResetTtl family=cf region=ed0c58842ad93c456247a9c71ef7bba5] 2024-12-03T23:28:25,725 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/.tmp/cf/280882a0d58745f9bc871972c812f83b is 206, key is 141c46e700dcbfc288b1cfb9b81672993/cf:q/1733268505055/Put/seqid=0 2024-12-03T23:28:25,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-03T23:28:25,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741981_1157 (size=14651) 2024-12-03T23:28:25,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741981_1157 (size=14651) 2024-12-03T23:28:25,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741981_1157 (size=14651) 2024-12-03T23:28:25,734 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/.tmp/cf/280882a0d58745f9bc871972c812f83b 2024-12-03T23:28:25,742 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/.tmp/cf/280882a0d58745f9bc871972c812f83b as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/cf/280882a0d58745f9bc871972c812f83b 2024-12-03T23:28:25,749 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/cf/280882a0d58745f9bc871972c812f83b, entries=46, sequenceid=6, filesize=14.3 K 2024-12-03T23:28:25,750 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for ed0c58842ad93c456247a9c71ef7bba5 in 473ms, sequenceid=6, compaction requested=false 2024-12-03T23:28:25,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for ed0c58842ad93c456247a9c71ef7bba5: 2024-12-03T23:28:25,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. for snaptb0-testExportWithResetTtl completed. 2024-12-03T23:28:25,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-03T23:28:25,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:25,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/cf/280882a0d58745f9bc871972c812f83b] hfiles 2024-12-03T23:28:25,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/cf/280882a0d58745f9bc871972c812f83b for snapshot=snaptb0-testExportWithResetTtl 2024-12-03T23:28:25,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741982_1158 (size=107) 2024-12-03T23:28:25,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741982_1158 (size=107) 2024-12-03T23:28:25,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741982_1158 (size=107) 2024-12-03T23:28:25,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:25,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-03T23:28:25,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-12-03T23:28:25,758 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:25,758 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:25,760 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-12-03T23:28:25,760 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ed0c58842ad93c456247a9c71ef7bba5 in 636 msec 2024-12-03T23:28:25,760 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:28:25,761 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:28:25,762 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:28:25,762 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:28:25,762 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:25,763 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241203ca69e17b0dc3423782ef73d08290774d_ed0c58842ad93c456247a9c71ef7bba5, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202412038af39e906ceb4f03bbf482bde966d73a_800e371e963fd783c084d4294a35b750] hfiles 2024-12-03T23:28:25,763 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241203ca69e17b0dc3423782ef73d08290774d_ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:25,763 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202412038af39e906ceb4f03bbf482bde966d73a_800e371e963fd783c084d4294a35b750 2024-12-03T23:28:25,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741983_1159 (size=291) 2024-12-03T23:28:25,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741983_1159 (size=291) 2024-12-03T23:28:25,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741983_1159 (size=291) 2024-12-03T23:28:25,772 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:28:25,772 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-03T23:28:25,773 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-03T23:28:25,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741984_1160 (size=951) 2024-12-03T23:28:25,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741984_1160 (size=951) 2024-12-03T23:28:25,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741984_1160 (size=951) 2024-12-03T23:28:25,789 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:28:25,795 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:28:25,796 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-03T23:28:25,797 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:28:25,798 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-03T23:28:25,799 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 696 msec 2024-12-03T23:28:26,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-03T23:28:26,241 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-03T23:28:26,244 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:28:26,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-12-03T23:28:26,248 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:28:26,248 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-12-03T23:28:26,249 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:28:26,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-03T23:28:26,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741985_1161 (size=433) 2024-12-03T23:28:26,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741985_1161 (size=433) 2024-12-03T23:28:26,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741985_1161 (size=433) 2024-12-03T23:28:26,260 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d5c7bcc8bff6e27703baa7934d9eea50, NAME => 'testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:26,261 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f91ebec0757a404e510cf50f61b4011d, NAME => 'testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:26,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741986_1162 (size=58) 2024-12-03T23:28:26,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741986_1162 (size=58) 2024-12-03T23:28:26,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741986_1162 (size=58) 2024-12-03T23:28:26,282 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:26,282 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing f91ebec0757a404e510cf50f61b4011d, disabling compactions & flushes 2024-12-03T23:28:26,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741987_1163 (size=58) 2024-12-03T23:28:26,282 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. 2024-12-03T23:28:26,282 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. 2024-12-03T23:28:26,282 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. after waiting 0 ms 2024-12-03T23:28:26,282 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. 2024-12-03T23:28:26,282 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. 2024-12-03T23:28:26,282 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for f91ebec0757a404e510cf50f61b4011d: Waiting for close lock at 1733268506282Disabling compacts and flushes for region at 1733268506282Disabling writes for close at 1733268506282Writing region close event to WAL at 1733268506282Closed at 1733268506282 2024-12-03T23:28:26,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741987_1163 (size=58) 2024-12-03T23:28:26,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741987_1163 (size=58) 2024-12-03T23:28:26,283 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:26,283 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing d5c7bcc8bff6e27703baa7934d9eea50, disabling compactions & flushes 2024-12-03T23:28:26,283 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:26,284 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:26,284 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. after waiting 0 ms 2024-12-03T23:28:26,284 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:26,284 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:26,284 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for d5c7bcc8bff6e27703baa7934d9eea50: Waiting for close lock at 1733268506283Disabling compacts and flushes for region at 1733268506283Disabling writes for close at 1733268506284 (+1 ms)Writing region close event to WAL at 1733268506284Closed at 1733268506284 2024-12-03T23:28:26,285 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:28:26,285 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733268506285"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268506285"}]},"ts":"1733268506285"} 2024-12-03T23:28:26,285 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733268506285"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268506285"}]},"ts":"1733268506285"} 2024-12-03T23:28:26,287 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-03T23:28:26,288 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:28:26,288 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268506288"}]},"ts":"1733268506288"} 2024-12-03T23:28:26,290 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-03T23:28:26,290 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:28:26,292 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:28:26,292 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:28:26,292 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:28:26,292 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:28:26,292 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:28:26,292 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:28:26,293 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:28:26,293 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:28:26,293 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:28:26,293 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:28:26,293 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d5c7bcc8bff6e27703baa7934d9eea50, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=f91ebec0757a404e510cf50f61b4011d, ASSIGN}] 2024-12-03T23:28:26,295 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d5c7bcc8bff6e27703baa7934d9eea50, ASSIGN 2024-12-03T23:28:26,295 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=f91ebec0757a404e510cf50f61b4011d, ASSIGN 2024-12-03T23:28:26,296 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=f91ebec0757a404e510cf50f61b4011d, ASSIGN; state=OFFLINE, location=ed8014902436,37351,1733268370600; forceNewPlan=false, retain=false 2024-12-03T23:28:26,296 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=d5c7bcc8bff6e27703baa7934d9eea50, ASSIGN; state=OFFLINE, location=ed8014902436,41121,1733268370525; forceNewPlan=false, retain=false 2024-12-03T23:28:26,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-03T23:28:26,446 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:28:26,447 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=f91ebec0757a404e510cf50f61b4011d, regionState=OPENING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:28:26,447 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=d5c7bcc8bff6e27703baa7934d9eea50, regionState=OPENING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:28:26,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=d5c7bcc8bff6e27703baa7934d9eea50, ASSIGN because future has completed 2024-12-03T23:28:26,449 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure d5c7bcc8bff6e27703baa7934d9eea50, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:28:26,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=f91ebec0757a404e510cf50f61b4011d, ASSIGN because future has completed 2024-12-03T23:28:26,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure f91ebec0757a404e510cf50f61b4011d, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:28:26,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-03T23:28:26,606 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:26,607 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => d5c7bcc8bff6e27703baa7934d9eea50, NAME => 'testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50.', STARTKEY => '', ENDKEY => '1'} 2024-12-03T23:28:26,607 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. 2024-12-03T23:28:26,607 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => f91ebec0757a404e510cf50f61b4011d, NAME => 'testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d.', STARTKEY => '1', ENDKEY => ''} 2024-12-03T23:28:26,607 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. service=AccessControlService 2024-12-03T23:28:26,608 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. service=AccessControlService 2024-12-03T23:28:26,608 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:28:26,608 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:28:26,608 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:26,608 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:26,608 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:26,608 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:26,608 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:26,608 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:26,608 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:26,608 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:26,610 INFO [StoreOpener-f91ebec0757a404e510cf50f61b4011d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:26,611 INFO [StoreOpener-d5c7bcc8bff6e27703baa7934d9eea50-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:26,613 INFO [StoreOpener-f91ebec0757a404e510cf50f61b4011d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f91ebec0757a404e510cf50f61b4011d columnFamilyName cf 2024-12-03T23:28:26,613 INFO [StoreOpener-d5c7bcc8bff6e27703baa7934d9eea50-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d5c7bcc8bff6e27703baa7934d9eea50 columnFamilyName cf 2024-12-03T23:28:26,614 DEBUG [StoreOpener-d5c7bcc8bff6e27703baa7934d9eea50-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:26,614 DEBUG [StoreOpener-f91ebec0757a404e510cf50f61b4011d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:26,615 INFO [StoreOpener-d5c7bcc8bff6e27703baa7934d9eea50-1 {}] regionserver.HStore(327): Store=d5c7bcc8bff6e27703baa7934d9eea50/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:28:26,615 INFO [StoreOpener-f91ebec0757a404e510cf50f61b4011d-1 {}] regionserver.HStore(327): Store=f91ebec0757a404e510cf50f61b4011d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:28:26,616 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:26,616 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:26,617 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:26,617 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:26,617 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:26,617 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:26,618 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:26,618 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:26,618 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:26,618 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:26,620 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:26,621 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:26,623 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:28:26,623 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:28:26,624 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened d5c7bcc8bff6e27703baa7934d9eea50; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68177782, jitterRate=0.01592811942100525}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:28:26,624 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened f91ebec0757a404e510cf50f61b4011d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68906468, jitterRate=0.026786386966705322}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:28:26,624 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:26,624 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:26,625 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for f91ebec0757a404e510cf50f61b4011d: Running coprocessor pre-open hook at 1733268506608Writing region info on filesystem at 1733268506609 (+1 ms)Initializing all the Stores at 1733268506610 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268506610Cleaning up temporary data from old regions at 1733268506618 (+8 ms)Running coprocessor post-open hooks at 1733268506624 (+6 ms)Region opened successfully at 1733268506625 (+1 ms) 2024-12-03T23:28:26,625 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for d5c7bcc8bff6e27703baa7934d9eea50: Running coprocessor pre-open hook at 1733268506608Writing region info on filesystem at 1733268506609 (+1 ms)Initializing all the Stores at 1733268506610 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268506610Cleaning up temporary data from old regions at 1733268506618 (+8 ms)Running coprocessor post-open hooks at 1733268506624 (+6 ms)Region opened successfully at 1733268506625 (+1 ms) 2024-12-03T23:28:26,626 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d., pid=78, masterSystemTime=1733268506603 2024-12-03T23:28:26,626 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50., pid=77, masterSystemTime=1733268506602 2024-12-03T23:28:26,628 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. 2024-12-03T23:28:26,628 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. 2024-12-03T23:28:26,628 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=f91ebec0757a404e510cf50f61b4011d, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:28:26,629 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:26,629 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:26,629 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=d5c7bcc8bff6e27703baa7934d9eea50, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:28:26,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure f91ebec0757a404e510cf50f61b4011d, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:28:26,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure d5c7bcc8bff6e27703baa7934d9eea50, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:28:26,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=76 2024-12-03T23:28:26,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure f91ebec0757a404e510cf50f61b4011d, server=ed8014902436,37351,1733268370600 in 180 msec 2024-12-03T23:28:26,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=75 2024-12-03T23:28:26,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure d5c7bcc8bff6e27703baa7934d9eea50, server=ed8014902436,41121,1733268370525 in 183 msec 2024-12-03T23:28:26,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=f91ebec0757a404e510cf50f61b4011d, ASSIGN in 340 msec 2024-12-03T23:28:26,635 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=75, resume processing ppid=74 2024-12-03T23:28:26,635 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d5c7bcc8bff6e27703baa7934d9eea50, ASSIGN in 340 msec 2024-12-03T23:28:26,636 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:28:26,636 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268506636"}]},"ts":"1733268506636"} 2024-12-03T23:28:26,637 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-03T23:28:26,638 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:28:26,638 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-03T23:28:26,641 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-03T23:28:26,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:26,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:26,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:26,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:26,750 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:26,750 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:26,750 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:26,751 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:26,751 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:26,751 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:26,751 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:26,751 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:26,752 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 506 msec 2024-12-03T23:28:26,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-03T23:28:26,880 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-12-03T23:28:26,880 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:26,882 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-03T23:28:26,882 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:26,882 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:28:26,884 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:26,888 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:26,894 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:26,903 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:28:26,906 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37351 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:28:26,907 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:26,910 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-03T23:28:26,910 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:26,910 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:28:26,912 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:26,917 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:26,924 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-03T23:28:26,926 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-03T23:28:26,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268506927 (current time:1733268506927). 2024-12-03T23:28:26,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-03T23:28:26,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:28:26,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a88b5b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:26,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:26,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:26,928 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:26,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:26,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:26,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32709bf9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:26,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:26,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:26,929 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:26,929 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43424, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:26,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47a6e95d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:26,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:26,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:26,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:26,932 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52046, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:26,933 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:28:26,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:26,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:26,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:26,933 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:26,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d06852c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:26,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:26,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:26,935 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:26,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:26,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:26,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769688, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:26,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:26,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:26,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:26,936 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43442, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:26,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7560bb86, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:26,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:26,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:26,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:26,940 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52054, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:26,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:28:26,944 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:28:26,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:26,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:26,945 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-03T23:28:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:28:26,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-03T23:28:26,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-03T23:28:26,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-03T23:28:26,947 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:28:26,948 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:28:26,950 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:28:26,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741988_1164 (size=143) 2024-12-03T23:28:26,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741988_1164 (size=143) 2024-12-03T23:28:26,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741988_1164 (size=143) 2024-12-03T23:28:26,958 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:28:26,959 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d5c7bcc8bff6e27703baa7934d9eea50}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f91ebec0757a404e510cf50f61b4011d}] 2024-12-03T23:28:26,960 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:26,960 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:27,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-03T23:28:27,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-12-03T23:28:27,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-12-03T23:28:27,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:27,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. 2024-12-03T23:28:27,113 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing d5c7bcc8bff6e27703baa7934d9eea50 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-03T23:28:27,113 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing f91ebec0757a404e510cf50f61b4011d 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-03T23:28:27,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203e9bacaba99e44b21a8b791e643f24c21_d5c7bcc8bff6e27703baa7934d9eea50 is 71, key is 0270af051da587aab3ede9c7c148cf7d/cf:q/1733268506903/Put/seqid=0 2024-12-03T23:28:27,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120302767d2db3b74c339d142cb1e82fe113_f91ebec0757a404e510cf50f61b4011d is 71, key is 1b77e449343554b81358cf4d9b5c6d47/cf:q/1733268506906/Put/seqid=0 2024-12-03T23:28:27,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741989_1165 (size=5032) 2024-12-03T23:28:27,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741989_1165 (size=5032) 2024-12-03T23:28:27,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741989_1165 (size=5032) 2024-12-03T23:28:27,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:27,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741990_1166 (size=8241) 2024-12-03T23:28:27,154 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203e9bacaba99e44b21a8b791e643f24c21_d5c7bcc8bff6e27703baa7934d9eea50 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241203e9bacaba99e44b21a8b791e643f24c21_d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:27,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741990_1166 (size=8241) 2024-12-03T23:28:27,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741990_1166 (size=8241) 2024-12-03T23:28:27,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:27,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/.tmp/cf/dd4dc44a03c94437a0d9f6aaad329e44, store: [table=testExportWithResetTtl family=cf region=d5c7bcc8bff6e27703baa7934d9eea50] 2024-12-03T23:28:27,156 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/.tmp/cf/dd4dc44a03c94437a0d9f6aaad329e44 is 199, key is 0388f040acc2210831f2f8ae652f1ffdd/cf:q/1733268506903/Put/seqid=0 2024-12-03T23:28:27,160 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120302767d2db3b74c339d142cb1e82fe113_f91ebec0757a404e510cf50f61b4011d to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120302767d2db3b74c339d142cb1e82fe113_f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:27,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741991_1167 (size=5679) 2024-12-03T23:28:27,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741991_1167 (size=5679) 2024-12-03T23:28:27,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/.tmp/cf/dba5bd9c73114de1a483a6312e8330d2, store: [table=testExportWithResetTtl family=cf region=f91ebec0757a404e510cf50f61b4011d] 2024-12-03T23:28:27,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741991_1167 (size=5679) 2024-12-03T23:28:27,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/.tmp/cf/dba5bd9c73114de1a483a6312e8330d2 is 199, key is 1c101a7c003768fe31f74910ed0722473/cf:q/1733268506906/Put/seqid=0 2024-12-03T23:28:27,162 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/.tmp/cf/dd4dc44a03c94437a0d9f6aaad329e44 2024-12-03T23:28:27,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741992_1168 (size=14712) 2024-12-03T23:28:27,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741992_1168 (size=14712) 2024-12-03T23:28:27,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741992_1168 (size=14712) 2024-12-03T23:28:27,170 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/.tmp/cf/dba5bd9c73114de1a483a6312e8330d2 2024-12-03T23:28:27,177 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/.tmp/cf/dd4dc44a03c94437a0d9f6aaad329e44 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/cf/dd4dc44a03c94437a0d9f6aaad329e44 2024-12-03T23:28:27,183 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/cf/dd4dc44a03c94437a0d9f6aaad329e44, entries=2, sequenceid=5, filesize=5.5 K 2024-12-03T23:28:27,184 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for d5c7bcc8bff6e27703baa7934d9eea50 in 71ms, sequenceid=5, compaction requested=false 2024-12-03T23:28:27,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-03T23:28:27,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/.tmp/cf/dba5bd9c73114de1a483a6312e8330d2 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/cf/dba5bd9c73114de1a483a6312e8330d2 2024-12-03T23:28:27,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for d5c7bcc8bff6e27703baa7934d9eea50: 2024-12-03T23:28:27,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. for snaptb-testExportWithResetTtl completed. 2024-12-03T23:28:27,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-03T23:28:27,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:27,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/cf/dd4dc44a03c94437a0d9f6aaad329e44] hfiles 2024-12-03T23:28:27,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/cf/dd4dc44a03c94437a0d9f6aaad329e44 for snapshot=snaptb-testExportWithResetTtl 2024-12-03T23:28:27,190 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/cf/dba5bd9c73114de1a483a6312e8330d2, entries=48, sequenceid=5, filesize=14.4 K 2024-12-03T23:28:27,191 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for f91ebec0757a404e510cf50f61b4011d in 78ms, sequenceid=5, compaction requested=false 2024-12-03T23:28:27,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for f91ebec0757a404e510cf50f61b4011d: 2024-12-03T23:28:27,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. for snaptb-testExportWithResetTtl completed. 2024-12-03T23:28:27,192 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-03T23:28:27,192 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:27,192 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/cf/dba5bd9c73114de1a483a6312e8330d2] hfiles 2024-12-03T23:28:27,192 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/cf/dba5bd9c73114de1a483a6312e8330d2 for snapshot=snaptb-testExportWithResetTtl 2024-12-03T23:28:27,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741993_1169 (size=100) 2024-12-03T23:28:27,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741993_1169 (size=100) 2024-12-03T23:28:27,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741993_1169 (size=100) 2024-12-03T23:28:27,198 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:27,198 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-03T23:28:27,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-12-03T23:28:27,198 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:27,199 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:27,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d5c7bcc8bff6e27703baa7934d9eea50 in 241 msec 2024-12-03T23:28:27,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741994_1170 (size=100) 2024-12-03T23:28:27,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741994_1170 (size=100) 2024-12-03T23:28:27,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741994_1170 (size=100) 2024-12-03T23:28:27,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. 2024-12-03T23:28:27,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-12-03T23:28:27,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-12-03T23:28:27,202 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:27,202 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:27,205 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=81, resume processing ppid=79 2024-12-03T23:28:27,205 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f91ebec0757a404e510cf50f61b4011d in 245 msec 2024-12-03T23:28:27,205 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:28:27,205 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:28:27,206 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:28:27,206 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:28:27,206 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:27,208 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120302767d2db3b74c339d142cb1e82fe113_f91ebec0757a404e510cf50f61b4011d, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241203e9bacaba99e44b21a8b791e643f24c21_d5c7bcc8bff6e27703baa7934d9eea50] hfiles 2024-12-03T23:28:27,208 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120302767d2db3b74c339d142cb1e82fe113_f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:27,208 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241203e9bacaba99e44b21a8b791e643f24c21_d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:27,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741995_1171 (size=284) 2024-12-03T23:28:27,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741995_1171 (size=284) 2024-12-03T23:28:27,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741995_1171 (size=284) 2024-12-03T23:28:27,219 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:28:27,220 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-03T23:28:27,221 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-03T23:28:27,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741996_1172 (size=923) 2024-12-03T23:28:27,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741996_1172 (size=923) 2024-12-03T23:28:27,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741996_1172 (size=923) 2024-12-03T23:28:27,247 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:28:27,254 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:28:27,255 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-03T23:28:27,256 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:28:27,256 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-03T23:28:27,258 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 310 msec 2024-12-03T23:28:27,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-03T23:28:27,259 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-12-03T23:28:27,271 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268507270 2024-12-03T23:28:27,271 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37331, tgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268507270, rawTgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268507270, srcFsUri=hdfs://localhost:37331, srcDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:27,305 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37331, inputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:27,305 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268507270, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268507270/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-03T23:28:27,307 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-03T23:28:27,312 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268507270/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-03T23:28:27,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741998_1174 (size=143) 2024-12-03T23:28:27,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741998_1174 (size=143) 2024-12-03T23:28:27,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741997_1173 (size=923) 2024-12-03T23:28:27,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741997_1173 (size=923) 2024-12-03T23:28:27,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741997_1173 (size=923) 2024-12-03T23:28:27,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741998_1174 (size=143) 2024-12-03T23:28:27,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741999_1175 (size=141) 2024-12-03T23:28:27,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741999_1175 (size=141) 2024-12-03T23:28:27,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741999_1175 (size=141) 2024-12-03T23:28:27,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:27,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:27,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:27,663 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0002_000001 (auth:SIMPLE) from 127.0.0.1:55878 2024-12-03T23:28:27,683 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000001/launch_container.sh] 2024-12-03T23:28:27,683 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000001/container_tokens] 2024-12-03T23:28:27,683 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0002/container_1733268377251_0002_01_000001/sysfs] 2024-12-03T23:28:28,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-17963077341387913869.jar 2024-12-03T23:28:28,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:28,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:28,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-3169575315343302002.jar 2024-12-03T23:28:28,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:28,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:28,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:28,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:28,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:28,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:28,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-03T23:28:28,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-03T23:28:28,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-03T23:28:28,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-03T23:28:28,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-03T23:28:28,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-03T23:28:28,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-03T23:28:28,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-03T23:28:28,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-03T23:28:28,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-03T23:28:28,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-03T23:28:28,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:28:28,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:28:28,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:28:28,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:28:28,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:28:28,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:28:28,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:28:28,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742000_1176 (size=131440) 2024-12-03T23:28:28,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742000_1176 (size=131440) 2024-12-03T23:28:28,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742000_1176 (size=131440) 2024-12-03T23:28:28,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742001_1177 (size=4188619) 2024-12-03T23:28:28,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742001_1177 (size=4188619) 2024-12-03T23:28:28,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742001_1177 (size=4188619) 2024-12-03T23:28:28,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742002_1178 (size=1323991) 2024-12-03T23:28:28,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742002_1178 (size=1323991) 2024-12-03T23:28:28,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742002_1178 (size=1323991) 2024-12-03T23:28:28,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742003_1179 (size=903924) 2024-12-03T23:28:28,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742003_1179 (size=903924) 2024-12-03T23:28:28,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742003_1179 (size=903924) 2024-12-03T23:28:28,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742004_1180 (size=8360360) 2024-12-03T23:28:28,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742004_1180 (size=8360360) 2024-12-03T23:28:28,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742004_1180 (size=8360360) 2024-12-03T23:28:28,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742005_1181 (size=1877034) 2024-12-03T23:28:28,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742005_1181 (size=1877034) 2024-12-03T23:28:28,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742005_1181 (size=1877034) 2024-12-03T23:28:28,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742006_1182 (size=77835) 2024-12-03T23:28:28,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742006_1182 (size=77835) 2024-12-03T23:28:28,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742006_1182 (size=77835) 2024-12-03T23:28:28,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742007_1183 (size=30949) 2024-12-03T23:28:28,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742007_1183 (size=30949) 2024-12-03T23:28:28,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742007_1183 (size=30949) 2024-12-03T23:28:28,684 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:28:28,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742008_1184 (size=1597213) 2024-12-03T23:28:28,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742008_1184 (size=1597213) 2024-12-03T23:28:28,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742008_1184 (size=1597213) 2024-12-03T23:28:28,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742009_1185 (size=4695811) 2024-12-03T23:28:28,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742009_1185 (size=4695811) 2024-12-03T23:28:28,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742009_1185 (size=4695811) 2024-12-03T23:28:28,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742010_1186 (size=232957) 2024-12-03T23:28:28,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742010_1186 (size=232957) 2024-12-03T23:28:28,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742010_1186 (size=232957) 2024-12-03T23:28:28,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742011_1187 (size=443172) 2024-12-03T23:28:28,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742011_1187 (size=443172) 2024-12-03T23:28:28,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742011_1187 (size=443172) 2024-12-03T23:28:28,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742012_1188 (size=127628) 2024-12-03T23:28:28,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742012_1188 (size=127628) 2024-12-03T23:28:28,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742012_1188 (size=127628) 2024-12-03T23:28:28,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742013_1189 (size=20406) 2024-12-03T23:28:28,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742013_1189 (size=20406) 2024-12-03T23:28:28,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742013_1189 (size=20406) 2024-12-03T23:28:28,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742014_1190 (size=6424741) 2024-12-03T23:28:28,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742014_1190 (size=6424741) 2024-12-03T23:28:28,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742014_1190 (size=6424741) 2024-12-03T23:28:28,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742015_1191 (size=5175431) 2024-12-03T23:28:28,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742015_1191 (size=5175431) 2024-12-03T23:28:28,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742015_1191 (size=5175431) 2024-12-03T23:28:28,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742016_1192 (size=217634) 2024-12-03T23:28:28,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742016_1192 (size=217634) 2024-12-03T23:28:28,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742016_1192 (size=217634) 2024-12-03T23:28:28,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742017_1193 (size=1832290) 2024-12-03T23:28:28,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742017_1193 (size=1832290) 2024-12-03T23:28:28,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742017_1193 (size=1832290) 2024-12-03T23:28:28,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742018_1194 (size=322274) 2024-12-03T23:28:28,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742018_1194 (size=322274) 2024-12-03T23:28:28,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742018_1194 (size=322274) 2024-12-03T23:28:28,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742019_1195 (size=503880) 2024-12-03T23:28:28,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742019_1195 (size=503880) 2024-12-03T23:28:28,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742019_1195 (size=503880) 2024-12-03T23:28:29,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742020_1196 (size=29229) 2024-12-03T23:28:29,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742020_1196 (size=29229) 2024-12-03T23:28:29,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742020_1196 (size=29229) 2024-12-03T23:28:29,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742021_1197 (size=24096) 2024-12-03T23:28:29,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742021_1197 (size=24096) 2024-12-03T23:28:29,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742021_1197 (size=24096) 2024-12-03T23:28:29,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742022_1198 (size=111872) 2024-12-03T23:28:29,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742022_1198 (size=111872) 2024-12-03T23:28:29,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742022_1198 (size=111872) 2024-12-03T23:28:29,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742023_1199 (size=45609) 2024-12-03T23:28:29,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742023_1199 (size=45609) 2024-12-03T23:28:29,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742023_1199 (size=45609) 2024-12-03T23:28:29,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742024_1200 (size=136454) 2024-12-03T23:28:29,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742024_1200 (size=136454) 2024-12-03T23:28:29,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742024_1200 (size=136454) 2024-12-03T23:28:29,030 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-03T23:28:29,032 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-03T23:28:29,034 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.4 K 2024-12-03T23:28:29,034 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-03T23:28:29,034 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.5 K 2024-12-03T23:28:29,034 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-12-03T23:28:29,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742025_1201 (size=995) 2024-12-03T23:28:29,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742025_1201 (size=995) 2024-12-03T23:28:29,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742025_1201 (size=995) 2024-12-03T23:28:29,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742026_1202 (size=35) 2024-12-03T23:28:29,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742026_1202 (size=35) 2024-12-03T23:28:29,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742026_1202 (size=35) 2024-12-03T23:28:29,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742027_1203 (size=303992) 2024-12-03T23:28:29,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742027_1203 (size=303992) 2024-12-03T23:28:29,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742027_1203 (size=303992) 2024-12-03T23:28:29,074 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:28:29,074 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:28:29,506 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0003_000001 (auth:SIMPLE) from 127.0.0.1:51910 2024-12-03T23:28:29,917 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-03T23:28:29,918 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-03T23:28:29,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-03T23:28:29,918 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-03T23:28:29,919 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-03T23:28:34,802 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0003_000001 (auth:SIMPLE) from 127.0.0.1:39028 2024-12-03T23:28:35,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742028_1204 (size=349690) 2024-12-03T23:28:35,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742028_1204 (size=349690) 2024-12-03T23:28:35,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742028_1204 (size=349690) 2024-12-03T23:28:35,421 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:28:37,118 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0003_000001 (auth:SIMPLE) from 127.0.0.1:56794 2024-12-03T23:28:37,118 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0003_000001 (auth:SIMPLE) from 127.0.0.1:50896 2024-12-03T23:28:37,909 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0003_000001 (auth:SIMPLE) from 127.0.0.1:56808 2024-12-03T23:28:37,937 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0003_000001 (auth:SIMPLE) from 127.0.0.1:50898 2024-12-03T23:28:38,448 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:28:40,670 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0003_01_000006 while processing FINISH_CONTAINERS event 2024-12-03T23:28:41,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742029_1205 (size=14712) 2024-12-03T23:28:41,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742029_1205 (size=14712) 2024-12-03T23:28:41,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742029_1205 (size=14712) 2024-12-03T23:28:43,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742031_1207 (size=8241) 2024-12-03T23:28:43,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742031_1207 (size=8241) 2024-12-03T23:28:43,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742031_1207 (size=8241) 2024-12-03T23:28:44,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742032_1208 (size=5032) 2024-12-03T23:28:44,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742032_1208 (size=5032) 2024-12-03T23:28:44,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742032_1208 (size=5032) 2024-12-03T23:28:44,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742033_1209 (size=5679) 2024-12-03T23:28:44,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742033_1209 (size=5679) 2024-12-03T23:28:44,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742033_1209 (size=5679) 2024-12-03T23:28:44,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742030_1206 (size=31710) 2024-12-03T23:28:44,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742030_1206 (size=31710) 2024-12-03T23:28:44,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742030_1206 (size=31710) 2024-12-03T23:28:44,455 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_0/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000005/launch_container.sh] 2024-12-03T23:28:44,455 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_0/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000005/container_tokens] 2024-12-03T23:28:44,455 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_0/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000005/sysfs] 2024-12-03T23:28:44,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742034_1210 (size=462) 2024-12-03T23:28:44,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742034_1210 (size=462) 2024-12-03T23:28:44,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742034_1210 (size=462) 2024-12-03T23:28:44,519 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000004/launch_container.sh] 2024-12-03T23:28:44,519 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000004/container_tokens] 2024-12-03T23:28:44,519 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000004/sysfs] 2024-12-03T23:28:44,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742035_1211 (size=31710) 2024-12-03T23:28:44,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742035_1211 (size=31710) 2024-12-03T23:28:44,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742035_1211 (size=31710) 2024-12-03T23:28:44,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742036_1212 (size=349690) 2024-12-03T23:28:44,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742036_1212 (size=349690) 2024-12-03T23:28:44,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742036_1212 (size=349690) 2024-12-03T23:28:44,624 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0003_000001 (auth:SIMPLE) from 127.0.0.1:48528 2024-12-03T23:28:44,640 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0003_000001 (auth:SIMPLE) from 127.0.0.1:34392 2024-12-03T23:28:44,667 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0003_000001 (auth:SIMPLE) from 127.0.0.1:48540 2024-12-03T23:28:46,268 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-03T23:28:46,269 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-03T23:28:46,288 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-12-03T23:28:46,289 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-03T23:28:46,289 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-03T23:28:46,289 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-03T23:28:46,290 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-03T23:28:46,290 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-03T23:28:46,290 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268507270/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268507270/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-03T23:28:46,290 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268507270/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-03T23:28:46,290 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268507270/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-03T23:28:46,303 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-03T23:28:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-12-03T23:28:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-03T23:28:46,310 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268526310"}]},"ts":"1733268526310"} 2024-12-03T23:28:46,312 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-03T23:28:46,312 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-03T23:28:46,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-03T23:28:46,316 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d5c7bcc8bff6e27703baa7934d9eea50, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=f91ebec0757a404e510cf50f61b4011d, UNASSIGN}] 2024-12-03T23:28:46,317 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=f91ebec0757a404e510cf50f61b4011d, UNASSIGN 2024-12-03T23:28:46,317 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d5c7bcc8bff6e27703baa7934d9eea50, UNASSIGN 2024-12-03T23:28:46,318 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=f91ebec0757a404e510cf50f61b4011d, regionState=CLOSING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:28:46,319 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=d5c7bcc8bff6e27703baa7934d9eea50, regionState=CLOSING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:28:46,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=f91ebec0757a404e510cf50f61b4011d, UNASSIGN because future has completed 2024-12-03T23:28:46,321 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:28:46,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure f91ebec0757a404e510cf50f61b4011d, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:28:46,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=d5c7bcc8bff6e27703baa7934d9eea50, UNASSIGN because future has completed 2024-12-03T23:28:46,324 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:28:46,324 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure d5c7bcc8bff6e27703baa7934d9eea50, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:28:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-03T23:28:46,477 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:46,477 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:46,477 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:28:46,477 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:28:46,477 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing f91ebec0757a404e510cf50f61b4011d, disabling compactions & flushes 2024-12-03T23:28:46,477 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing d5c7bcc8bff6e27703baa7934d9eea50, disabling compactions & flushes 2024-12-03T23:28:46,477 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. 2024-12-03T23:28:46,477 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:46,477 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. 2024-12-03T23:28:46,477 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:46,477 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. after waiting 0 ms 2024-12-03T23:28:46,477 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. after waiting 0 ms 2024-12-03T23:28:46,477 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. 2024-12-03T23:28:46,477 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:46,482 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-03T23:28:46,482 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-03T23:28:46,482 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:28:46,482 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:28:46,483 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50. 2024-12-03T23:28:46,483 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d. 2024-12-03T23:28:46,483 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for d5c7bcc8bff6e27703baa7934d9eea50: Waiting for close lock at 1733268526477Running coprocessor pre-close hooks at 1733268526477Disabling compacts and flushes for region at 1733268526477Disabling writes for close at 1733268526477Writing region close event to WAL at 1733268526478 (+1 ms)Running coprocessor post-close hooks at 1733268526482 (+4 ms)Closed at 1733268526483 (+1 ms) 2024-12-03T23:28:46,483 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for f91ebec0757a404e510cf50f61b4011d: Waiting for close lock at 1733268526477Running coprocessor pre-close hooks at 1733268526477Disabling compacts and flushes for region at 1733268526477Disabling writes for close at 1733268526477Writing region close event to WAL at 1733268526478 (+1 ms)Running coprocessor post-close hooks at 1733268526482 (+4 ms)Closed at 1733268526483 (+1 ms) 2024-12-03T23:28:46,485 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:46,485 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=f91ebec0757a404e510cf50f61b4011d, regionState=CLOSED 2024-12-03T23:28:46,485 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:46,486 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=d5c7bcc8bff6e27703baa7934d9eea50, regionState=CLOSED 2024-12-03T23:28:46,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure f91ebec0757a404e510cf50f61b4011d, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:28:46,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure d5c7bcc8bff6e27703baa7934d9eea50, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:28:46,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=85 2024-12-03T23:28:46,491 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure f91ebec0757a404e510cf50f61b4011d, server=ed8014902436,37351,1733268370600 in 168 msec 2024-12-03T23:28:46,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=84 2024-12-03T23:28:46,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=f91ebec0757a404e510cf50f61b4011d, UNASSIGN in 176 msec 2024-12-03T23:28:46,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure d5c7bcc8bff6e27703baa7934d9eea50, server=ed8014902436,41121,1733268370525 in 166 msec 2024-12-03T23:28:46,495 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=84, resume processing ppid=83 2024-12-03T23:28:46,495 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d5c7bcc8bff6e27703baa7934d9eea50, UNASSIGN in 177 msec 2024-12-03T23:28:46,497 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-12-03T23:28:46,497 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 182 msec 2024-12-03T23:28:46,499 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268526499"}]},"ts":"1733268526499"} 2024-12-03T23:28:46,502 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-03T23:28:46,502 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-03T23:28:46,505 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 200 msec 2024-12-03T23:28:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-03T23:28:46,629 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-12-03T23:28:46,630 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-03T23:28:46,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-12-03T23:28:46,632 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-03T23:28:46,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-12-03T23:28:46,635 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-03T23:28:46,638 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-03T23:28:46,641 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:46,641 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:46,644 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/recovered.edits] 2024-12-03T23:28:46,647 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/recovered.edits] 2024-12-03T23:28:46,669 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/cf/dba5bd9c73114de1a483a6312e8330d2 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/cf/dba5bd9c73114de1a483a6312e8330d2 2024-12-03T23:28:46,669 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/cf/dd4dc44a03c94437a0d9f6aaad329e44 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/cf/dd4dc44a03c94437a0d9f6aaad329e44 2024-12-03T23:28:46,672 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/recovered.edits/8.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50/recovered.edits/8.seqid 2024-12-03T23:28:46,672 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/recovered.edits/8.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d/recovered.edits/8.seqid 2024-12-03T23:28:46,673 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:46,673 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportWithResetTtl/f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:46,673 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-03T23:28:46,673 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-03T23:28:46,674 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-12-03T23:28:46,692 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120302767d2db3b74c339d142cb1e82fe113_f91ebec0757a404e510cf50f61b4011d to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120302767d2db3b74c339d142cb1e82fe113_f91ebec0757a404e510cf50f61b4011d 2024-12-03T23:28:46,694 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241203e9bacaba99e44b21a8b791e643f24c21_d5c7bcc8bff6e27703baa7934d9eea50 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241203e9bacaba99e44b21a8b791e643f24c21_d5c7bcc8bff6e27703baa7934d9eea50 2024-12-03T23:28:46,694 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-03T23:28:46,697 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-03T23:28:46,702 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-03T23:28:46,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-03T23:28:46,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-03T23:28:46,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-03T23:28:46,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-03T23:28:46,709 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-03T23:28:46,709 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-03T23:28:46,709 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-03T23:28:46,709 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-03T23:28:46,710 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-12-03T23:28:46,711 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-03T23:28:46,711 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-12-03T23:28:46,712 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268526711"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:46,712 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268526711"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:46,714 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-03T23:28:46,714 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d5c7bcc8bff6e27703baa7934d9eea50, NAME => 'testExportWithResetTtl,,1733268506243.d5c7bcc8bff6e27703baa7934d9eea50.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f91ebec0757a404e510cf50f61b4011d, NAME => 'testExportWithResetTtl,1,1733268506243.f91ebec0757a404e510cf50f61b4011d.', STARTKEY => '1', ENDKEY => ''}] 2024-12-03T23:28:46,714 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-12-03T23:28:46,714 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733268526714"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:46,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-03T23:28:46,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-03T23:28:46,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:46,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:46,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-03T23:28:46,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:46,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-03T23:28:46,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:46,718 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:46,718 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:46,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-03T23:28:46,718 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-12-03T23:28:46,718 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:46,719 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:46,719 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-03T23:28:46,720 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 89 msec 2024-12-03T23:28:46,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-03T23:28:46,830 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-12-03T23:28:46,830 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-12-03T23:28:46,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-03T23:28:46,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-03T23:28:46,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-03T23:28:46,835 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268526835"}]},"ts":"1733268526835"} 2024-12-03T23:28:46,837 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-03T23:28:46,837 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-03T23:28:46,838 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-03T23:28:46,840 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=800e371e963fd783c084d4294a35b750, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ed0c58842ad93c456247a9c71ef7bba5, UNASSIGN}] 2024-12-03T23:28:46,841 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=800e371e963fd783c084d4294a35b750, UNASSIGN 2024-12-03T23:28:46,841 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ed0c58842ad93c456247a9c71ef7bba5, UNASSIGN 2024-12-03T23:28:46,842 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=800e371e963fd783c084d4294a35b750, regionState=CLOSING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:28:46,842 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=ed0c58842ad93c456247a9c71ef7bba5, regionState=CLOSING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:28:46,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=800e371e963fd783c084d4294a35b750, UNASSIGN because future has completed 2024-12-03T23:28:46,844 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:28:46,844 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 800e371e963fd783c084d4294a35b750, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:28:46,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ed0c58842ad93c456247a9c71ef7bba5, UNASSIGN because future has completed 2024-12-03T23:28:46,845 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:28:46,845 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure ed0c58842ad93c456247a9c71ef7bba5, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:28:46,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-03T23:28:46,952 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000002/launch_container.sh] 2024-12-03T23:28:46,953 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000002/container_tokens] 2024-12-03T23:28:46,953 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000002/sysfs] 2024-12-03T23:28:46,997 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:46,997 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:28:46,997 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing 800e371e963fd783c084d4294a35b750, disabling compactions & flushes 2024-12-03T23:28:46,997 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:46,997 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:46,997 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. after waiting 0 ms 2024-12-03T23:28:46,997 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:46,998 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:46,998 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:28:46,998 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing ed0c58842ad93c456247a9c71ef7bba5, disabling compactions & flushes 2024-12-03T23:28:46,998 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:46,998 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:46,998 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. after waiting 0 ms 2024-12-03T23:28:46,998 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:47,001 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:28:47,001 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:28:47,002 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:28:47,002 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:28:47,002 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5. 2024-12-03T23:28:47,002 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750. 2024-12-03T23:28:47,002 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for ed0c58842ad93c456247a9c71ef7bba5: Waiting for close lock at 1733268526998Running coprocessor pre-close hooks at 1733268526998Disabling compacts and flushes for region at 1733268526998Disabling writes for close at 1733268526998Writing region close event to WAL at 1733268526999 (+1 ms)Running coprocessor post-close hooks at 1733268527002 (+3 ms)Closed at 1733268527002 2024-12-03T23:28:47,002 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for 800e371e963fd783c084d4294a35b750: Waiting for close lock at 1733268526997Running coprocessor pre-close hooks at 1733268526997Disabling compacts and flushes for region at 1733268526997Disabling writes for close at 1733268526997Writing region close event to WAL at 1733268526998 (+1 ms)Running coprocessor post-close hooks at 1733268527002 (+4 ms)Closed at 1733268527002 2024-12-03T23:28:47,003 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:47,004 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=ed0c58842ad93c456247a9c71ef7bba5, regionState=CLOSED 2024-12-03T23:28:47,005 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed 800e371e963fd783c084d4294a35b750 2024-12-03T23:28:47,005 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=800e371e963fd783c084d4294a35b750, regionState=CLOSED 2024-12-03T23:28:47,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure ed0c58842ad93c456247a9c71ef7bba5, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:28:47,007 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 800e371e963fd783c084d4294a35b750, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:28:47,009 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=91 2024-12-03T23:28:47,009 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure 800e371e963fd783c084d4294a35b750, server=ed8014902436,39085,1733268370401 in 164 msec 2024-12-03T23:28:47,010 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=92 2024-12-03T23:28:47,010 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure ed0c58842ad93c456247a9c71ef7bba5, server=ed8014902436,37351,1733268370600 in 163 msec 2024-12-03T23:28:47,010 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=800e371e963fd783c084d4294a35b750, UNASSIGN in 169 msec 2024-12-03T23:28:47,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=90 2024-12-03T23:28:47,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ed0c58842ad93c456247a9c71ef7bba5, UNASSIGN in 170 msec 2024-12-03T23:28:47,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-12-03T23:28:47,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 174 msec 2024-12-03T23:28:47,014 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268527014"}]},"ts":"1733268527014"} 2024-12-03T23:28:47,016 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-03T23:28:47,016 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-03T23:28:47,018 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 186 msec 2024-12-03T23:28:47,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-03T23:28:47,149 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-03T23:28:47,150 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-03T23:28:47,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-03T23:28:47,152 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-03T23:28:47,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-03T23:28:47,153 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-03T23:28:47,158 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-03T23:28:47,158 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750 2024-12-03T23:28:47,158 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:47,164 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/recovered.edits] 2024-12-03T23:28:47,164 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/recovered.edits] 2024-12-03T23:28:47,195 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/cf/280882a0d58745f9bc871972c812f83b to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/cf/280882a0d58745f9bc871972c812f83b 2024-12-03T23:28:47,196 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/cf/ebfb8c49d4d34194a5862f85b5445c74 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/cf/ebfb8c49d4d34194a5862f85b5445c74 2024-12-03T23:28:47,200 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750/recovered.edits/9.seqid 2024-12-03T23:28:47,200 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5/recovered.edits/9.seqid 2024-12-03T23:28:47,201 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/800e371e963fd783c084d4294a35b750 2024-12-03T23:28:47,201 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithResetTtl/ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:47,201 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-03T23:28:47,202 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-03T23:28:47,203 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-12-03T23:28:47,207 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241203ca69e17b0dc3423782ef73d08290774d_ed0c58842ad93c456247a9c71ef7bba5 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241203ca69e17b0dc3423782ef73d08290774d_ed0c58842ad93c456247a9c71ef7bba5 2024-12-03T23:28:47,209 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202412038af39e906ceb4f03bbf482bde966d73a_800e371e963fd783c084d4294a35b750 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202412038af39e906ceb4f03bbf482bde966d73a_800e371e963fd783c084d4294a35b750 2024-12-03T23:28:47,210 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-03T23:28:47,213 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-03T23:28:47,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-03T23:28:47,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-03T23:28:47,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-03T23:28:47,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-03T23:28:47,216 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-03T23:28:47,217 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-03T23:28:47,217 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-03T23:28:47,217 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-03T23:28:47,217 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-03T23:28:47,220 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-03T23:28:47,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-03T23:28:47,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-03T23:28:47,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-03T23:28:47,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:47,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:47,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:47,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-03T23:28:47,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:47,225 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-03T23:28:47,225 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-03T23:28:47,225 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268527225"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:47,226 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268527225"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:47,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-03T23:28:47,229 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-03T23:28:47,229 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 800e371e963fd783c084d4294a35b750, NAME => 'testtb-testExportWithResetTtl,,1733268503524.800e371e963fd783c084d4294a35b750.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ed0c58842ad93c456247a9c71ef7bba5, NAME => 'testtb-testExportWithResetTtl,1,1733268503524.ed0c58842ad93c456247a9c71ef7bba5.', STARTKEY => '1', ENDKEY => ''}] 2024-12-03T23:28:47,229 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-03T23:28:47,229 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733268527229"}]},"ts":"9223372036854775807"} 2024-12-03T23:28:47,232 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-12-03T23:28:47,233 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-03T23:28:47,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 83 msec 2024-12-03T23:28:47,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-03T23:28:47,330 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-12-03T23:28:47,330 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-03T23:28:47,338 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-12-03T23:28:47,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-03T23:28:47,341 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-12-03T23:28:47,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-03T23:28:47,344 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-12-03T23:28:47,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-03T23:28:47,367 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=799 (was 797) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:39379 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:42400 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:48958 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39379 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 134860) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1422444861_1 at /127.0.0.1:33454 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3158 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:34591 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:33472 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=814 (was 819), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=692 (was 529) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 22), AvailableMemoryMB=1882 (was 1945) 2024-12-03T23:28:47,368 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-12-03T23:28:47,383 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=799, OpenFileDescriptor=814, MaxFileDescriptor=1048576, SystemLoadAverage=692, ProcessCount=19, AvailableMemoryMB=1882 2024-12-03T23:28:47,383 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-12-03T23:28:47,384 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:28:47,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-03T23:28:47,386 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:28:47,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-12-03T23:28:47,387 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:28:47,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-03T23:28:47,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742037_1213 (size=443) 2024-12-03T23:28:47,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742037_1213 (size=443) 2024-12-03T23:28:47,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742037_1213 (size=443) 2024-12-03T23:28:47,396 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => efbe1e8e42a250871665f21f12ab1262, NAME => 'testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:47,396 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 6d9af970788ccb97e8ac8580406838a5, NAME => 'testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:47,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742038_1214 (size=68) 2024-12-03T23:28:47,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742039_1215 (size=68) 2024-12-03T23:28:47,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742038_1214 (size=68) 2024-12-03T23:28:47,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742038_1214 (size=68) 2024-12-03T23:28:47,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742039_1215 (size=68) 2024-12-03T23:28:47,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742039_1215 (size=68) 2024-12-03T23:28:47,413 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:47,413 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing efbe1e8e42a250871665f21f12ab1262, disabling compactions & flushes 2024-12-03T23:28:47,413 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:28:47,413 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:28:47,413 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. after waiting 0 ms 2024-12-03T23:28:47,413 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:28:47,413 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:28:47,414 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for efbe1e8e42a250871665f21f12ab1262: Waiting for close lock at 1733268527413Disabling compacts and flushes for region at 1733268527413Disabling writes for close at 1733268527413Writing region close event to WAL at 1733268527413Closed at 1733268527413 2024-12-03T23:28:47,415 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:47,415 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 6d9af970788ccb97e8ac8580406838a5, disabling compactions & flushes 2024-12-03T23:28:47,415 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:28:47,415 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:28:47,415 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. after waiting 0 ms 2024-12-03T23:28:47,415 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:28:47,415 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:28:47,415 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 6d9af970788ccb97e8ac8580406838a5: Waiting for close lock at 1733268527415Disabling compacts and flushes for region at 1733268527415Disabling writes for close at 1733268527415Writing region close event to WAL at 1733268527415Closed at 1733268527415 2024-12-03T23:28:47,416 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:28:47,416 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733268527416"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268527416"}]},"ts":"1733268527416"} 2024-12-03T23:28:47,416 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733268527416"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268527416"}]},"ts":"1733268527416"} 2024-12-03T23:28:47,418 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-03T23:28:47,419 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:28:47,419 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268527419"}]},"ts":"1733268527419"} 2024-12-03T23:28:47,421 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-03T23:28:47,421 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:28:47,422 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:28:47,422 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:28:47,422 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:28:47,422 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:28:47,422 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:28:47,422 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:28:47,422 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:28:47,422 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:28:47,422 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:28:47,422 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:28:47,422 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=efbe1e8e42a250871665f21f12ab1262, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6d9af970788ccb97e8ac8580406838a5, ASSIGN}] 2024-12-03T23:28:47,423 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=efbe1e8e42a250871665f21f12ab1262, ASSIGN 2024-12-03T23:28:47,424 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6d9af970788ccb97e8ac8580406838a5, ASSIGN 2024-12-03T23:28:47,424 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6d9af970788ccb97e8ac8580406838a5, ASSIGN; state=OFFLINE, location=ed8014902436,39085,1733268370401; forceNewPlan=false, retain=false 2024-12-03T23:28:47,424 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=efbe1e8e42a250871665f21f12ab1262, ASSIGN; state=OFFLINE, location=ed8014902436,41121,1733268370525; forceNewPlan=false, retain=false 2024-12-03T23:28:47,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-03T23:28:47,575 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:28:47,575 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=efbe1e8e42a250871665f21f12ab1262, regionState=OPENING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:28:47,576 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=6d9af970788ccb97e8ac8580406838a5, regionState=OPENING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:28:47,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=efbe1e8e42a250871665f21f12ab1262, ASSIGN because future has completed 2024-12-03T23:28:47,580 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure efbe1e8e42a250871665f21f12ab1262, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:28:47,582 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6d9af970788ccb97e8ac8580406838a5, ASSIGN because future has completed 2024-12-03T23:28:47,583 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6d9af970788ccb97e8ac8580406838a5, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:28:47,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-03T23:28:47,738 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:28:47,738 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => efbe1e8e42a250871665f21f12ab1262, NAME => 'testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262.', STARTKEY => '', ENDKEY => '1'} 2024-12-03T23:28:47,738 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. service=AccessControlService 2024-12-03T23:28:47,738 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:28:47,738 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:28:47,738 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => 6d9af970788ccb97e8ac8580406838a5, NAME => 'testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5.', STARTKEY => '1', ENDKEY => ''} 2024-12-03T23:28:47,738 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:47,739 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:47,739 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:47,739 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:47,739 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. service=AccessControlService 2024-12-03T23:28:47,739 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:28:47,739 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:47,739 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:28:47,739 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:47,739 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:47,740 INFO [StoreOpener-efbe1e8e42a250871665f21f12ab1262-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:47,740 INFO [StoreOpener-6d9af970788ccb97e8ac8580406838a5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:47,741 INFO [StoreOpener-6d9af970788ccb97e8ac8580406838a5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6d9af970788ccb97e8ac8580406838a5 columnFamilyName cf 2024-12-03T23:28:47,741 INFO [StoreOpener-efbe1e8e42a250871665f21f12ab1262-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region efbe1e8e42a250871665f21f12ab1262 columnFamilyName cf 2024-12-03T23:28:47,742 DEBUG [StoreOpener-6d9af970788ccb97e8ac8580406838a5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:47,742 DEBUG [StoreOpener-efbe1e8e42a250871665f21f12ab1262-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:47,743 INFO [StoreOpener-efbe1e8e42a250871665f21f12ab1262-1 {}] regionserver.HStore(327): Store=efbe1e8e42a250871665f21f12ab1262/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:28:47,743 INFO [StoreOpener-6d9af970788ccb97e8ac8580406838a5-1 {}] regionserver.HStore(327): Store=6d9af970788ccb97e8ac8580406838a5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:28:47,743 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:47,743 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:47,744 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:47,744 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:47,744 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:47,744 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:47,745 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:47,745 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:47,745 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:47,745 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:47,746 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:47,746 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:47,748 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:28:47,748 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:28:47,749 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened efbe1e8e42a250871665f21f12ab1262; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75441574, jitterRate=0.1241670548915863}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:28:47,749 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened 6d9af970788ccb97e8ac8580406838a5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72718608, jitterRate=0.08359169960021973}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:28:47,749 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:47,749 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:47,749 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for efbe1e8e42a250871665f21f12ab1262: Running coprocessor pre-open hook at 1733268527739Writing region info on filesystem at 1733268527739Initializing all the Stores at 1733268527740 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268527740Cleaning up temporary data from old regions at 1733268527745 (+5 ms)Running coprocessor post-open hooks at 1733268527749 (+4 ms)Region opened successfully at 1733268527749 2024-12-03T23:28:47,749 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for 6d9af970788ccb97e8ac8580406838a5: Running coprocessor pre-open hook at 1733268527739Writing region info on filesystem at 1733268527739Initializing all the Stores at 1733268527740 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268527740Cleaning up temporary data from old regions at 1733268527745 (+5 ms)Running coprocessor post-open hooks at 1733268527749 (+4 ms)Region opened successfully at 1733268527749 2024-12-03T23:28:47,750 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262., pid=99, masterSystemTime=1733268527735 2024-12-03T23:28:47,750 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5., pid=100, masterSystemTime=1733268527736 2024-12-03T23:28:47,752 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:28:47,752 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:28:47,752 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=6d9af970788ccb97e8ac8580406838a5, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:28:47,752 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:28:47,752 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:28:47,753 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=efbe1e8e42a250871665f21f12ab1262, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:28:47,754 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6d9af970788ccb97e8ac8580406838a5, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:28:47,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure efbe1e8e42a250871665f21f12ab1262, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:28:47,757 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=98 2024-12-03T23:28:47,757 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure 6d9af970788ccb97e8ac8580406838a5, server=ed8014902436,39085,1733268370401 in 172 msec 2024-12-03T23:28:47,758 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=97 2024-12-03T23:28:47,758 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure efbe1e8e42a250871665f21f12ab1262, server=ed8014902436,41121,1733268370525 in 176 msec 2024-12-03T23:28:47,760 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6d9af970788ccb97e8ac8580406838a5, ASSIGN in 335 msec 2024-12-03T23:28:47,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-12-03T23:28:47,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=efbe1e8e42a250871665f21f12ab1262, ASSIGN in 337 msec 2024-12-03T23:28:47,762 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:28:47,763 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268527762"}]},"ts":"1733268527762"} 2024-12-03T23:28:47,765 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-03T23:28:47,766 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:28:47,766 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-03T23:28:47,769 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-03T23:28:47,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:47,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:47,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:47,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:28:47,791 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:47,791 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:47,791 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:47,792 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-03T23:28:47,793 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 406 msec 2024-12-03T23:28:48,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-03T23:28:48,020 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-03T23:28:48,020 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:28:48,027 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-03T23:28:48,027 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:28:48,027 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:28:48,030 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:28:48,034 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:28:48,039 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:28:48,041 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-03T23:28:48,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268528041 (current time:1733268528041). 2024-12-03T23:28:48,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:28:48,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-03T23:28:48,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:28:48,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39bf3ee8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:48,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:48,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:48,043 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:48,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:48,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:48,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@222120a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:48,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:48,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:48,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:48,045 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32988, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:48,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c52596f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:48,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:48,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:48,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:48,048 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45330, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:48,049 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753. 2024-12-03T23:28:48,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:48,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:48,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:48,050 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:48,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77761c37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:48,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:48,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:48,051 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:48,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:48,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:48,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15a7c826, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:48,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:48,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:48,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:48,053 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32994, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:48,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bf649b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:48,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:48,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:48,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:48,057 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45334, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:48,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:28:48,061 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753. 2024-12-03T23:28:48,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:48,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:48,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:48,061 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:48,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-03T23:28:48,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:28:48,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-03T23:28:48,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-03T23:28:48,064 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:28:48,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-03T23:28:48,065 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:28:48,067 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:28:48,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742040_1216 (size=170) 2024-12-03T23:28:48,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742040_1216 (size=170) 2024-12-03T23:28:48,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742040_1216 (size=170) 2024-12-03T23:28:48,092 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:28:48,093 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efbe1e8e42a250871665f21f12ab1262}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d9af970788ccb97e8ac8580406838a5}] 2024-12-03T23:28:48,094 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:48,094 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:48,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-03T23:28:48,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-12-03T23:28:48,247 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:28:48,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-12-03T23:28:48,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:28:48,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 6d9af970788ccb97e8ac8580406838a5: 2024-12-03T23:28:48,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. for emptySnaptb0-testExportFileSystemState completed. 2024-12-03T23:28:48,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for efbe1e8e42a250871665f21f12ab1262: 2024-12-03T23:28:48,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. for emptySnaptb0-testExportFileSystemState completed. 2024-12-03T23:28:48,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-03T23:28:48,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:48,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-03T23:28:48,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:28:48,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:48,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:28:48,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742042_1218 (size=71) 2024-12-03T23:28:48,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742042_1218 (size=71) 2024-12-03T23:28:48,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742042_1218 (size=71) 2024-12-03T23:28:48,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:28:48,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-03T23:28:48,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-12-03T23:28:48,259 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:48,259 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:48,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742041_1217 (size=71) 2024-12-03T23:28:48,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742041_1217 (size=71) 2024-12-03T23:28:48,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742041_1217 (size=71) 2024-12-03T23:28:48,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:28:48,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-03T23:28:48,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-12-03T23:28:48,261 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:48,261 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:48,262 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6d9af970788ccb97e8ac8580406838a5 in 168 msec 2024-12-03T23:28:48,263 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=102, resume processing ppid=101 2024-12-03T23:28:48,263 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure efbe1e8e42a250871665f21f12ab1262 in 169 msec 2024-12-03T23:28:48,263 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:28:48,264 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:28:48,265 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:28:48,265 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:28:48,265 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:48,266 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-03T23:28:48,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742043_1219 (size=63) 2024-12-03T23:28:48,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742043_1219 (size=63) 2024-12-03T23:28:48,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742043_1219 (size=63) 2024-12-03T23:28:48,275 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:28:48,275 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-03T23:28:48,276 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-03T23:28:48,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742044_1220 (size=653) 2024-12-03T23:28:48,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742044_1220 (size=653) 2024-12-03T23:28:48,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742044_1220 (size=653) 2024-12-03T23:28:48,287 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:28:48,291 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:28:48,292 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-03T23:28:48,293 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:28:48,293 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-03T23:28:48,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 231 msec 2024-12-03T23:28:48,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-03T23:28:48,380 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-03T23:28:48,385 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:28:48,388 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:28:48,389 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:28:48,392 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-03T23:28:48,392 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:28:48,392 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:28:48,394 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:28:48,399 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:28:48,406 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:28:48,409 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-03T23:28:48,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268528409 (current time:1733268528409). 2024-12-03T23:28:48,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:28:48,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-03T23:28:48,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:28:48,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a5bcb58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:48,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:48,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:48,411 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:48,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:48,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:48,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44de8cc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:48,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:48,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:48,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:48,412 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33008, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:48,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39788b68, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:48,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:48,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:48,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:48,415 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45336, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:48,417 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753. 2024-12-03T23:28:48,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:48,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:48,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:48,417 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:48,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b4e2a79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:48,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:28:48,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:28:48,419 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:28:48,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:28:48,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:28:48,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6be9c550, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:48,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:28:48,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:28:48,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:48,420 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33026, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:28:48,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ca2b6e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:28:48,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:28:48,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:28:48,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:28:48,423 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45346, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:28:48,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:28:48,427 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753. 2024-12-03T23:28:48,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:28:48,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:48,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:28:48,427 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:28:48,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-03T23:28:48,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:28:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-03T23:28:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-03T23:28:48,430 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:28:48,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-03T23:28:48,431 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:28:48,433 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:28:48,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742045_1221 (size=165) 2024-12-03T23:28:48,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742045_1221 (size=165) 2024-12-03T23:28:48,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742045_1221 (size=165) 2024-12-03T23:28:48,440 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:28:48,440 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efbe1e8e42a250871665f21f12ab1262}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d9af970788ccb97e8ac8580406838a5}] 2024-12-03T23:28:48,441 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:48,441 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:48,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-03T23:28:48,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-03T23:28:48,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-03T23:28:48,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:28:48,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:28:48,594 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing efbe1e8e42a250871665f21f12ab1262 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-03T23:28:48,594 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing 6d9af970788ccb97e8ac8580406838a5 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-03T23:28:48,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412030c2475deb8aa42f9950f1b70b82966b2_6d9af970788ccb97e8ac8580406838a5 is 71, key is 1038c2604b8ecba423bc3f3783231bdb/cf:q/1733268528388/Put/seqid=0 2024-12-03T23:28:48,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120307a7d75576ab41ba807f8b0fb30f88b7_efbe1e8e42a250871665f21f12ab1262 is 71, key is 00b25627e7c4c086a0500d45429c5863/cf:q/1733268528385/Put/seqid=0 2024-12-03T23:28:48,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742046_1222 (size=8031) 2024-12-03T23:28:48,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742046_1222 (size=8031) 2024-12-03T23:28:48,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742046_1222 (size=8031) 2024-12-03T23:28:48,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742047_1223 (size=5241) 2024-12-03T23:28:48,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742047_1223 (size=5241) 2024-12-03T23:28:48,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:48,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742047_1223 (size=5241) 2024-12-03T23:28:48,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:48,633 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120307a7d75576ab41ba807f8b0fb30f88b7_efbe1e8e42a250871665f21f12ab1262 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120307a7d75576ab41ba807f8b0fb30f88b7_efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:48,633 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412030c2475deb8aa42f9950f1b70b82966b2_6d9af970788ccb97e8ac8580406838a5 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412030c2475deb8aa42f9950f1b70b82966b2_6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:48,634 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/.tmp/cf/4264a4d072f9489ab33c3ec0a2a8c56e, store: [table=testtb-testExportFileSystemState family=cf region=efbe1e8e42a250871665f21f12ab1262] 2024-12-03T23:28:48,634 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/.tmp/cf/1cb86284c8a74059aaad5f443fd5924f, store: [table=testtb-testExportFileSystemState family=cf region=6d9af970788ccb97e8ac8580406838a5] 2024-12-03T23:28:48,634 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/.tmp/cf/1cb86284c8a74059aaad5f443fd5924f is 209, key is 1f0b5361533267cea3f95eb7f04ba19e4/cf:q/1733268528388/Put/seqid=0 2024-12-03T23:28:48,634 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/.tmp/cf/4264a4d072f9489ab33c3ec0a2a8c56e is 209, key is 0a69dc335b75987435a230b58e4d61b3a/cf:q/1733268528385/Put/seqid=0 2024-12-03T23:28:48,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742048_1224 (size=14587) 2024-12-03T23:28:48,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742049_1225 (size=6326) 2024-12-03T23:28:48,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742048_1224 (size=14587) 2024-12-03T23:28:48,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742049_1225 (size=6326) 2024-12-03T23:28:48,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742048_1224 (size=14587) 2024-12-03T23:28:48,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742049_1225 (size=6326) 2024-12-03T23:28:48,640 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/.tmp/cf/4264a4d072f9489ab33c3ec0a2a8c56e 2024-12-03T23:28:48,640 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/.tmp/cf/1cb86284c8a74059aaad5f443fd5924f 2024-12-03T23:28:48,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/.tmp/cf/4264a4d072f9489ab33c3ec0a2a8c56e as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/cf/4264a4d072f9489ab33c3ec0a2a8c56e 2024-12-03T23:28:48,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/.tmp/cf/1cb86284c8a74059aaad5f443fd5924f as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/cf/1cb86284c8a74059aaad5f443fd5924f 2024-12-03T23:28:48,650 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/cf/1cb86284c8a74059aaad5f443fd5924f, entries=45, sequenceid=6, filesize=14.2 K 2024-12-03T23:28:48,650 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/cf/4264a4d072f9489ab33c3ec0a2a8c56e, entries=5, sequenceid=6, filesize=6.2 K 2024-12-03T23:28:48,651 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 6d9af970788ccb97e8ac8580406838a5 in 56ms, sequenceid=6, compaction requested=false 2024-12-03T23:28:48,651 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for efbe1e8e42a250871665f21f12ab1262 in 57ms, sequenceid=6, compaction requested=false 2024-12-03T23:28:48,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-03T23:28:48,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-03T23:28:48,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for 6d9af970788ccb97e8ac8580406838a5: 2024-12-03T23:28:48,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for efbe1e8e42a250871665f21f12ab1262: 2024-12-03T23:28:48,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. for snaptb0-testExportFileSystemState completed. 2024-12-03T23:28:48,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. for snaptb0-testExportFileSystemState completed. 2024-12-03T23:28:48,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-03T23:28:48,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-03T23:28:48,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:48,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:28:48,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/cf/1cb86284c8a74059aaad5f443fd5924f] hfiles 2024-12-03T23:28:48,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/cf/4264a4d072f9489ab33c3ec0a2a8c56e] hfiles 2024-12-03T23:28:48,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/cf/4264a4d072f9489ab33c3ec0a2a8c56e for snapshot=snaptb0-testExportFileSystemState 2024-12-03T23:28:48,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/cf/1cb86284c8a74059aaad5f443fd5924f for snapshot=snaptb0-testExportFileSystemState 2024-12-03T23:28:48,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742050_1226 (size=110) 2024-12-03T23:28:48,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742050_1226 (size=110) 2024-12-03T23:28:48,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742051_1227 (size=110) 2024-12-03T23:28:48,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742051_1227 (size=110) 2024-12-03T23:28:48,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742051_1227 (size=110) 2024-12-03T23:28:48,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742050_1226 (size=110) 2024-12-03T23:28:48,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:28:48,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:28:48,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-03T23:28:48,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-03T23:28:48,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-12-03T23:28:48,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-12-03T23:28:48,659 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:48,660 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:48,660 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:48,660 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:48,661 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6d9af970788ccb97e8ac8580406838a5 in 220 msec 2024-12-03T23:28:48,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=105, resume processing ppid=104 2024-12-03T23:28:48,663 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:28:48,663 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure efbe1e8e42a250871665f21f12ab1262 in 220 msec 2024-12-03T23:28:48,663 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:28:48,664 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:28:48,664 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:28:48,664 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:28:48,665 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412030c2475deb8aa42f9950f1b70b82966b2_6d9af970788ccb97e8ac8580406838a5, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120307a7d75576ab41ba807f8b0fb30f88b7_efbe1e8e42a250871665f21f12ab1262] hfiles 2024-12-03T23:28:48,665 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412030c2475deb8aa42f9950f1b70b82966b2_6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:28:48,665 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120307a7d75576ab41ba807f8b0fb30f88b7_efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:28:48,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742052_1228 (size=294) 2024-12-03T23:28:48,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742052_1228 (size=294) 2024-12-03T23:28:48,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742052_1228 (size=294) 2024-12-03T23:28:48,672 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:28:48,672 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-03T23:28:48,672 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-03T23:28:48,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742053_1229 (size=963) 2024-12-03T23:28:48,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742053_1229 (size=963) 2024-12-03T23:28:48,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742053_1229 (size=963) 2024-12-03T23:28:48,682 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:28:48,687 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:28:48,687 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-03T23:28:48,689 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:28:48,689 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-03T23:28:48,690 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 261 msec 2024-12-03T23:28:48,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-03T23:28:48,750 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-03T23:28:48,750 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268528750 2024-12-03T23:28:48,751 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37331, tgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268528750, rawTgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268528750, srcFsUri=hdfs://localhost:37331, srcDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:48,778 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37331, inputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:28:48,778 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268528750, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268528750/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-03T23:28:48,780 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-03T23:28:48,784 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268528750/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-03T23:28:48,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742054_1230 (size=165) 2024-12-03T23:28:48,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742054_1230 (size=165) 2024-12-03T23:28:48,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742054_1230 (size=165) 2024-12-03T23:28:48,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742055_1231 (size=963) 2024-12-03T23:28:48,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742055_1231 (size=963) 2024-12-03T23:28:48,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742055_1231 (size=963) 2024-12-03T23:28:48,805 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:48,805 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:48,806 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:49,277 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000003/launch_container.sh] 2024-12-03T23:28:49,277 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000003/container_tokens] 2024-12-03T23:28:49,277 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000003/sysfs] 2024-12-03T23:28:49,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-15478288684438229948.jar 2024-12-03T23:28:49,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:49,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:49,723 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-5133844360682027484.jar 2024-12-03T23:28:49,724 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:49,724 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:49,724 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:49,725 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:49,725 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:49,725 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:28:49,725 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-03T23:28:49,726 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-03T23:28:49,726 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-03T23:28:49,726 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-03T23:28:49,727 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-03T23:28:49,727 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-03T23:28:49,727 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-03T23:28:49,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-03T23:28:49,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-03T23:28:49,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-03T23:28:49,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-03T23:28:49,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:28:49,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:28:49,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:28:49,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:28:49,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:28:49,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:28:49,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:28:49,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742056_1232 (size=131440) 2024-12-03T23:28:49,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742056_1232 (size=131440) 2024-12-03T23:28:49,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742056_1232 (size=131440) 2024-12-03T23:28:49,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742057_1233 (size=4188619) 2024-12-03T23:28:49,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742057_1233 (size=4188619) 2024-12-03T23:28:49,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742057_1233 (size=4188619) 2024-12-03T23:28:49,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742058_1234 (size=1323991) 2024-12-03T23:28:49,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742058_1234 (size=1323991) 2024-12-03T23:28:49,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742058_1234 (size=1323991) 2024-12-03T23:28:49,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742059_1235 (size=903924) 2024-12-03T23:28:49,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742059_1235 (size=903924) 2024-12-03T23:28:49,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742059_1235 (size=903924) 2024-12-03T23:28:49,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742060_1236 (size=8360360) 2024-12-03T23:28:49,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742060_1236 (size=8360360) 2024-12-03T23:28:49,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742060_1236 (size=8360360) 2024-12-03T23:28:49,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742061_1237 (size=443172) 2024-12-03T23:28:49,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742061_1237 (size=443172) 2024-12-03T23:28:49,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742061_1237 (size=443172) 2024-12-03T23:28:49,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742062_1238 (size=1877034) 2024-12-03T23:28:49,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742062_1238 (size=1877034) 2024-12-03T23:28:49,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742062_1238 (size=1877034) 2024-12-03T23:28:49,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742063_1239 (size=77835) 2024-12-03T23:28:49,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742063_1239 (size=77835) 2024-12-03T23:28:49,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742063_1239 (size=77835) 2024-12-03T23:28:49,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742064_1240 (size=30949) 2024-12-03T23:28:49,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742064_1240 (size=30949) 2024-12-03T23:28:49,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742064_1240 (size=30949) 2024-12-03T23:28:49,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742065_1241 (size=1597213) 2024-12-03T23:28:49,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742065_1241 (size=1597213) 2024-12-03T23:28:49,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742065_1241 (size=1597213) 2024-12-03T23:28:49,917 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-03T23:28:49,918 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-03T23:28:49,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-03T23:28:49,919 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-03T23:28:49,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742066_1242 (size=4695811) 2024-12-03T23:28:49,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742066_1242 (size=4695811) 2024-12-03T23:28:49,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742066_1242 (size=4695811) 2024-12-03T23:28:49,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742067_1243 (size=232957) 2024-12-03T23:28:49,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742067_1243 (size=232957) 2024-12-03T23:28:49,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742067_1243 (size=232957) 2024-12-03T23:28:49,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742068_1244 (size=127628) 2024-12-03T23:28:49,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742068_1244 (size=127628) 2024-12-03T23:28:49,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742068_1244 (size=127628) 2024-12-03T23:28:49,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742069_1245 (size=20406) 2024-12-03T23:28:49,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742069_1245 (size=20406) 2024-12-03T23:28:49,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742069_1245 (size=20406) 2024-12-03T23:28:49,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742070_1246 (size=5175431) 2024-12-03T23:28:49,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742070_1246 (size=5175431) 2024-12-03T23:28:49,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742070_1246 (size=5175431) 2024-12-03T23:28:49,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742071_1247 (size=217634) 2024-12-03T23:28:49,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742071_1247 (size=217634) 2024-12-03T23:28:49,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742071_1247 (size=217634) 2024-12-03T23:28:49,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742072_1248 (size=1832290) 2024-12-03T23:28:49,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742072_1248 (size=1832290) 2024-12-03T23:28:49,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742072_1248 (size=1832290) 2024-12-03T23:28:50,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742073_1249 (size=322274) 2024-12-03T23:28:50,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742073_1249 (size=322274) 2024-12-03T23:28:50,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742073_1249 (size=322274) 2024-12-03T23:28:50,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742074_1250 (size=503880) 2024-12-03T23:28:50,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742074_1250 (size=503880) 2024-12-03T23:28:50,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742074_1250 (size=503880) 2024-12-03T23:28:50,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742075_1251 (size=6424741) 2024-12-03T23:28:50,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742075_1251 (size=6424741) 2024-12-03T23:28:50,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742075_1251 (size=6424741) 2024-12-03T23:28:50,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742076_1252 (size=29229) 2024-12-03T23:28:50,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742076_1252 (size=29229) 2024-12-03T23:28:50,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742076_1252 (size=29229) 2024-12-03T23:28:50,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742077_1253 (size=24096) 2024-12-03T23:28:50,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742077_1253 (size=24096) 2024-12-03T23:28:50,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742077_1253 (size=24096) 2024-12-03T23:28:50,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742078_1254 (size=111872) 2024-12-03T23:28:50,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742078_1254 (size=111872) 2024-12-03T23:28:50,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742078_1254 (size=111872) 2024-12-03T23:28:50,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742079_1255 (size=45609) 2024-12-03T23:28:50,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742079_1255 (size=45609) 2024-12-03T23:28:50,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742079_1255 (size=45609) 2024-12-03T23:28:50,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742080_1256 (size=136454) 2024-12-03T23:28:50,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742080_1256 (size=136454) 2024-12-03T23:28:50,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742080_1256 (size=136454) 2024-12-03T23:28:50,508 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-03T23:28:50,510 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-03T23:28:50,512 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.2 K 2024-12-03T23:28:50,512 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-12-03T23:28:50,513 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.2 K 2024-12-03T23:28:50,513 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-03T23:28:50,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742081_1257 (size=1035) 2024-12-03T23:28:50,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742081_1257 (size=1035) 2024-12-03T23:28:50,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742081_1257 (size=1035) 2024-12-03T23:28:50,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742082_1258 (size=35) 2024-12-03T23:28:50,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742082_1258 (size=35) 2024-12-03T23:28:50,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742082_1258 (size=35) 2024-12-03T23:28:50,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742083_1259 (size=304004) 2024-12-03T23:28:50,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742083_1259 (size=304004) 2024-12-03T23:28:50,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742083_1259 (size=304004) 2024-12-03T23:28:50,754 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:28:50,754 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:28:50,757 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0003_000001 (auth:SIMPLE) from 127.0.0.1:34396 2024-12-03T23:28:50,779 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000001/launch_container.sh] 2024-12-03T23:28:50,779 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000001/container_tokens] 2024-12-03T23:28:50,779 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0003/container_1733268377251_0003_01_000001/sysfs] 2024-12-03T23:28:51,439 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0004_000001 (auth:SIMPLE) from 127.0.0.1:48542 2024-12-03T23:28:52,007 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:28:56,545 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0004_000001 (auth:SIMPLE) from 127.0.0.1:55090 2024-12-03T23:28:56,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742084_1260 (size=349702) 2024-12-03T23:28:56,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742084_1260 (size=349702) 2024-12-03T23:28:56,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742084_1260 (size=349702) 2024-12-03T23:28:58,854 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0004_000001 (auth:SIMPLE) from 127.0.0.1:55288 2024-12-03T23:28:58,855 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0004_000001 (auth:SIMPLE) from 127.0.0.1:52706 2024-12-03T23:28:59,683 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0004_000001 (auth:SIMPLE) from 127.0.0.1:52710 2024-12-03T23:28:59,691 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0004_000001 (auth:SIMPLE) from 127.0.0.1:55302 2024-12-03T23:29:01,758 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0004_01_000006 while processing FINISH_CONTAINERS event 2024-12-03T23:29:04,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742085_1261 (size=8031) 2024-12-03T23:29:04,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742085_1261 (size=8031) 2024-12-03T23:29:04,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742085_1261 (size=8031) 2024-12-03T23:29:04,873 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000003/launch_container.sh] 2024-12-03T23:29:04,873 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000003/container_tokens] 2024-12-03T23:29:04,873 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000003/sysfs] 2024-12-03T23:29:06,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742087_1263 (size=5241) 2024-12-03T23:29:06,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742087_1263 (size=5241) 2024-12-03T23:29:06,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742087_1263 (size=5241) 2024-12-03T23:29:07,123 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000005/launch_container.sh] 2024-12-03T23:29:07,126 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000005/container_tokens] 2024-12-03T23:29:07,127 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000005/sysfs] 2024-12-03T23:29:07,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742088_1264 (size=14587) 2024-12-03T23:29:07,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742088_1264 (size=14587) 2024-12-03T23:29:07,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742088_1264 (size=14587) 2024-12-03T23:29:07,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742089_1265 (size=6326) 2024-12-03T23:29:07,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742089_1265 (size=6326) 2024-12-03T23:29:07,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742089_1265 (size=6326) 2024-12-03T23:29:07,483 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000002/launch_container.sh] 2024-12-03T23:29:07,483 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000002/container_tokens] 2024-12-03T23:29:07,488 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000002/sysfs] 2024-12-03T23:29:07,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742086_1262 (size=31749) 2024-12-03T23:29:07,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742086_1262 (size=31749) 2024-12-03T23:29:07,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742086_1262 (size=31749) 2024-12-03T23:29:07,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742090_1266 (size=466) 2024-12-03T23:29:07,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742090_1266 (size=466) 2024-12-03T23:29:07,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742090_1266 (size=466) 2024-12-03T23:29:07,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742091_1267 (size=31749) 2024-12-03T23:29:07,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742091_1267 (size=31749) 2024-12-03T23:29:07,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742091_1267 (size=31749) 2024-12-03T23:29:07,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742092_1268 (size=349702) 2024-12-03T23:29:07,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742092_1268 (size=349702) 2024-12-03T23:29:07,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742092_1268 (size=349702) 2024-12-03T23:29:08,448 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:29:08,886 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-03T23:29:08,887 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-03T23:29:08,899 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-12-03T23:29:08,899 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-03T23:29:08,900 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-03T23:29:08,900 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-03T23:29:08,901 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-03T23:29:08,901 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-03T23:29:08,901 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268528750/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268528750/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-03T23:29:08,903 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268528750/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-03T23:29:08,903 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268528750/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-03T23:29:08,948 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-03T23:29:08,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-03T23:29:08,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-03T23:29:08,955 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268548955"}]},"ts":"1733268548955"} 2024-12-03T23:29:08,960 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-03T23:29:08,960 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-03T23:29:08,961 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-03T23:29:08,963 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=efbe1e8e42a250871665f21f12ab1262, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6d9af970788ccb97e8ac8580406838a5, UNASSIGN}] 2024-12-03T23:29:08,964 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6d9af970788ccb97e8ac8580406838a5, UNASSIGN 2024-12-03T23:29:08,965 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=efbe1e8e42a250871665f21f12ab1262, UNASSIGN 2024-12-03T23:29:08,966 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=efbe1e8e42a250871665f21f12ab1262, regionState=CLOSING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:29:08,966 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=6d9af970788ccb97e8ac8580406838a5, regionState=CLOSING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:29:08,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=efbe1e8e42a250871665f21f12ab1262, UNASSIGN because future has completed 2024-12-03T23:29:08,969 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:29:08,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure efbe1e8e42a250871665f21f12ab1262, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:29:08,969 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6d9af970788ccb97e8ac8580406838a5, UNASSIGN because future has completed 2024-12-03T23:29:08,970 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:29:08,970 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6d9af970788ccb97e8ac8580406838a5, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:29:09,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-03T23:29:09,122 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:29:09,122 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:29:09,123 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing efbe1e8e42a250871665f21f12ab1262, disabling compactions & flushes 2024-12-03T23:29:09,123 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:29:09,123 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:29:09,123 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. after waiting 0 ms 2024-12-03T23:29:09,123 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:29:09,124 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:29:09,124 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:29:09,124 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing 6d9af970788ccb97e8ac8580406838a5, disabling compactions & flushes 2024-12-03T23:29:09,124 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:29:09,124 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:29:09,124 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. after waiting 0 ms 2024-12-03T23:29:09,124 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:29:09,203 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:29:09,204 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:29:09,205 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262. 2024-12-03T23:29:09,205 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for efbe1e8e42a250871665f21f12ab1262: Waiting for close lock at 1733268549122Running coprocessor pre-close hooks at 1733268549122Disabling compacts and flushes for region at 1733268549122Disabling writes for close at 1733268549123 (+1 ms)Writing region close event to WAL at 1733268549151 (+28 ms)Running coprocessor post-close hooks at 1733268549204 (+53 ms)Closed at 1733268549205 (+1 ms) 2024-12-03T23:29:09,209 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=efbe1e8e42a250871665f21f12ab1262, regionState=CLOSED 2024-12-03T23:29:09,213 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:29:09,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure efbe1e8e42a250871665f21f12ab1262, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:29:09,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=109 2024-12-03T23:29:09,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=efbe1e8e42a250871665f21f12ab1262, UNASSIGN in 255 msec 2024-12-03T23:29:09,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure efbe1e8e42a250871665f21f12ab1262, server=ed8014902436,41121,1733268370525 in 245 msec 2024-12-03T23:29:09,231 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:29:09,233 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:29:09,233 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5. 2024-12-03T23:29:09,233 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for 6d9af970788ccb97e8ac8580406838a5: Waiting for close lock at 1733268549124Running coprocessor pre-close hooks at 1733268549124Disabling compacts and flushes for region at 1733268549124Disabling writes for close at 1733268549124Writing region close event to WAL at 1733268549152 (+28 ms)Running coprocessor post-close hooks at 1733268549232 (+80 ms)Closed at 1733268549233 (+1 ms) 2024-12-03T23:29:09,236 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed 6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:29:09,237 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=6d9af970788ccb97e8ac8580406838a5, regionState=CLOSED 2024-12-03T23:29:09,240 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6d9af970788ccb97e8ac8580406838a5, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:29:09,247 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=110 2024-12-03T23:29:09,247 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure 6d9af970788ccb97e8ac8580406838a5, server=ed8014902436,39085,1733268370401 in 272 msec 2024-12-03T23:29:09,257 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=110, resume processing ppid=108 2024-12-03T23:29:09,257 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6d9af970788ccb97e8ac8580406838a5, UNASSIGN in 284 msec 2024-12-03T23:29:09,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-12-03T23:29:09,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 297 msec 2024-12-03T23:29:09,265 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268549264"}]},"ts":"1733268549264"} 2024-12-03T23:29:09,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-03T23:29:09,271 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-03T23:29:09,271 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-03T23:29:09,277 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 325 msec 2024-12-03T23:29:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-03T23:29:09,580 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-03T23:29:09,581 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-03T23:29:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-03T23:29:09,583 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-03T23:29:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-03T23:29:09,589 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-03T23:29:09,594 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-03T23:29:09,599 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:29:09,600 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:29:09,601 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/recovered.edits] 2024-12-03T23:29:09,607 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/cf/4264a4d072f9489ab33c3ec0a2a8c56e to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/cf/4264a4d072f9489ab33c3ec0a2a8c56e 2024-12-03T23:29:09,607 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/recovered.edits] 2024-12-03T23:29:09,621 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262/recovered.edits/9.seqid 2024-12-03T23:29:09,621 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:29:09,622 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/cf/1cb86284c8a74059aaad5f443fd5924f to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/cf/1cb86284c8a74059aaad5f443fd5924f 2024-12-03T23:29:09,624 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5/recovered.edits/9.seqid 2024-12-03T23:29:09,625 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemState/6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:29:09,625 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-03T23:29:09,626 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-03T23:29:09,627 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-12-03T23:29:09,630 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412030c2475deb8aa42f9950f1b70b82966b2_6d9af970788ccb97e8ac8580406838a5 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412030c2475deb8aa42f9950f1b70b82966b2_6d9af970788ccb97e8ac8580406838a5 2024-12-03T23:29:09,638 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120307a7d75576ab41ba807f8b0fb30f88b7_efbe1e8e42a250871665f21f12ab1262 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120307a7d75576ab41ba807f8b0fb30f88b7_efbe1e8e42a250871665f21f12ab1262 2024-12-03T23:29:09,639 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-03T23:29:09,641 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-03T23:29:09,645 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-03T23:29:09,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-03T23:29:09,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-03T23:29:09,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-03T23:29:09,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-03T23:29:09,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-03T23:29:09,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-03T23:29:09,674 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-03T23:29:09,674 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-03T23:29:09,674 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-03T23:29:09,676 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-03T23:29:09,676 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-03T23:29:09,676 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268549676"}]},"ts":"9223372036854775807"} 2024-12-03T23:29:09,676 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268549676"}]},"ts":"9223372036854775807"} 2024-12-03T23:29:09,678 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-03T23:29:09,678 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => efbe1e8e42a250871665f21f12ab1262, NAME => 'testtb-testExportFileSystemState,,1733268527384.efbe1e8e42a250871665f21f12ab1262.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6d9af970788ccb97e8ac8580406838a5, NAME => 'testtb-testExportFileSystemState,1,1733268527384.6d9af970788ccb97e8ac8580406838a5.', STARTKEY => '1', ENDKEY => ''}] 2024-12-03T23:29:09,679 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-03T23:29:09,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-03T23:29:09,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-03T23:29:09,679 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733268549679"}]},"ts":"9223372036854775807"} 2024-12-03T23:29:09,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-03T23:29:09,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:09,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:09,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:09,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-03T23:29:09,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:09,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-03T23:29:09,683 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-12-03T23:29:09,684 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-03T23:29:09,685 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 103 msec 2024-12-03T23:29:09,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-03T23:29:09,790 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-12-03T23:29:09,793 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-03T23:29:09,807 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-12-03T23:29:09,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-03T23:29:09,812 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-12-03T23:29:09,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-03T23:29:09,857 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=806 (was 799) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43607 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3995 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:49262 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1139930117_1 at /127.0.0.1:49252 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:34808 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 138559) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:43435 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1139930117_1 at /127.0.0.1:34772 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37783 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43435 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:45760 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=813 (was 814), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=728 (was 692) - SystemLoadAverage LEAK? -, ProcessCount=23 (was 19) - ProcessCount LEAK? -, AvailableMemoryMB=2864 (was 1882) - AvailableMemoryMB LEAK? - 2024-12-03T23:29:09,858 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-12-03T23:29:09,884 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=806, OpenFileDescriptor=813, MaxFileDescriptor=1048576, SystemLoadAverage=728, ProcessCount=20, AvailableMemoryMB=2868 2024-12-03T23:29:09,884 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-12-03T23:29:09,886 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:29:09,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-03T23:29:09,888 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:29:09,888 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-12-03T23:29:09,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-03T23:29:09,890 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:29:09,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742093_1269 (size=440) 2024-12-03T23:29:09,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742093_1269 (size=440) 2024-12-03T23:29:09,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742093_1269 (size=440) 2024-12-03T23:29:09,906 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b6ea905c40d950a4163b3b67c8be765a, NAME => 'testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:29:09,907 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => bbd2a25fcf245a74ce51ef1e4f5654f9, NAME => 'testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:29:09,917 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-03T23:29:09,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742094_1270 (size=65) 2024-12-03T23:29:09,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742094_1270 (size=65) 2024-12-03T23:29:09,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742094_1270 (size=65) 2024-12-03T23:29:09,927 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:29:09,927 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing b6ea905c40d950a4163b3b67c8be765a, disabling compactions & flushes 2024-12-03T23:29:09,927 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:09,927 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:09,927 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. after waiting 0 ms 2024-12-03T23:29:09,927 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:09,927 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:09,927 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for b6ea905c40d950a4163b3b67c8be765a: Waiting for close lock at 1733268549927Disabling compacts and flushes for region at 1733268549927Disabling writes for close at 1733268549927Writing region close event to WAL at 1733268549927Closed at 1733268549927 2024-12-03T23:29:09,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742095_1271 (size=65) 2024-12-03T23:29:09,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742095_1271 (size=65) 2024-12-03T23:29:09,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742095_1271 (size=65) 2024-12-03T23:29:09,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-03T23:29:10,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-03T23:29:10,340 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:29:10,340 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing bbd2a25fcf245a74ce51ef1e4f5654f9, disabling compactions & flushes 2024-12-03T23:29:10,340 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:10,340 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:10,340 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. after waiting 0 ms 2024-12-03T23:29:10,340 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:10,340 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:10,340 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for bbd2a25fcf245a74ce51ef1e4f5654f9: Waiting for close lock at 1733268550340Disabling compacts and flushes for region at 1733268550340Disabling writes for close at 1733268550340Writing region close event to WAL at 1733268550340Closed at 1733268550340 2024-12-03T23:29:10,342 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:29:10,342 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733268550342"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268550342"}]},"ts":"1733268550342"} 2024-12-03T23:29:10,342 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733268550342"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268550342"}]},"ts":"1733268550342"} 2024-12-03T23:29:10,346 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-03T23:29:10,347 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:29:10,348 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268550347"}]},"ts":"1733268550347"} 2024-12-03T23:29:10,351 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-03T23:29:10,352 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:29:10,354 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:29:10,354 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:29:10,354 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:29:10,354 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:29:10,354 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:29:10,354 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:29:10,354 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:29:10,354 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:29:10,354 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:29:10,354 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:29:10,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=b6ea905c40d950a4163b3b67c8be765a, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=bbd2a25fcf245a74ce51ef1e4f5654f9, ASSIGN}] 2024-12-03T23:29:10,362 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=b6ea905c40d950a4163b3b67c8be765a, ASSIGN 2024-12-03T23:29:10,362 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=bbd2a25fcf245a74ce51ef1e4f5654f9, ASSIGN 2024-12-03T23:29:10,363 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=b6ea905c40d950a4163b3b67c8be765a, ASSIGN; state=OFFLINE, location=ed8014902436,41121,1733268370525; forceNewPlan=false, retain=false 2024-12-03T23:29:10,363 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=bbd2a25fcf245a74ce51ef1e4f5654f9, ASSIGN; state=OFFLINE, location=ed8014902436,39085,1733268370401; forceNewPlan=false, retain=false 2024-12-03T23:29:10,514 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:29:10,514 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=b6ea905c40d950a4163b3b67c8be765a, regionState=OPENING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:29:10,515 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=bbd2a25fcf245a74ce51ef1e4f5654f9, regionState=OPENING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:29:10,516 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=b6ea905c40d950a4163b3b67c8be765a, ASSIGN because future has completed 2024-12-03T23:29:10,517 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure b6ea905c40d950a4163b3b67c8be765a, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:29:10,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=bbd2a25fcf245a74ce51ef1e4f5654f9, ASSIGN because future has completed 2024-12-03T23:29:10,518 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:29:10,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-03T23:29:10,672 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:10,672 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => bbd2a25fcf245a74ce51ef1e4f5654f9, NAME => 'testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9.', STARTKEY => '1', ENDKEY => ''} 2024-12-03T23:29:10,673 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:10,673 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. service=AccessControlService 2024-12-03T23:29:10,673 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => b6ea905c40d950a4163b3b67c8be765a, NAME => 'testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a.', STARTKEY => '', ENDKEY => '1'} 2024-12-03T23:29:10,673 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:29:10,673 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. service=AccessControlService 2024-12-03T23:29:10,673 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:29:10,673 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:10,673 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:29:10,673 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:10,673 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:29:10,673 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:10,673 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:10,673 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:10,673 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:10,675 INFO [StoreOpener-bbd2a25fcf245a74ce51ef1e4f5654f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:10,675 INFO [StoreOpener-b6ea905c40d950a4163b3b67c8be765a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:10,677 INFO [StoreOpener-b6ea905c40d950a4163b3b67c8be765a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b6ea905c40d950a4163b3b67c8be765a columnFamilyName cf 2024-12-03T23:29:10,677 INFO [StoreOpener-bbd2a25fcf245a74ce51ef1e4f5654f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bbd2a25fcf245a74ce51ef1e4f5654f9 columnFamilyName cf 2024-12-03T23:29:10,678 DEBUG [StoreOpener-b6ea905c40d950a4163b3b67c8be765a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:10,679 INFO [StoreOpener-b6ea905c40d950a4163b3b67c8be765a-1 {}] regionserver.HStore(327): Store=b6ea905c40d950a4163b3b67c8be765a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:29:10,679 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:10,680 DEBUG [StoreOpener-bbd2a25fcf245a74ce51ef1e4f5654f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:10,680 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:10,680 INFO [StoreOpener-bbd2a25fcf245a74ce51ef1e4f5654f9-1 {}] regionserver.HStore(327): Store=bbd2a25fcf245a74ce51ef1e4f5654f9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:29:10,680 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:10,681 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:10,681 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:10,681 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:10,681 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:10,683 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:10,683 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:10,683 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:10,683 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:10,687 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:29:10,688 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:10,688 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened b6ea905c40d950a4163b3b67c8be765a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64197561, jitterRate=-0.04338179528713226}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:29:10,688 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:10,689 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for b6ea905c40d950a4163b3b67c8be765a: Running coprocessor pre-open hook at 1733268550673Writing region info on filesystem at 1733268550673Initializing all the Stores at 1733268550674 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268550674Cleaning up temporary data from old regions at 1733268550681 (+7 ms)Running coprocessor post-open hooks at 1733268550688 (+7 ms)Region opened successfully at 1733268550689 (+1 ms) 2024-12-03T23:29:10,690 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a., pid=117, masterSystemTime=1733268550669 2024-12-03T23:29:10,691 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:29:10,692 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:10,692 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:10,692 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened bbd2a25fcf245a74ce51ef1e4f5654f9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73992215, jitterRate=0.10256992280483246}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:29:10,692 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:10,692 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for bbd2a25fcf245a74ce51ef1e4f5654f9: Running coprocessor pre-open hook at 1733268550673Writing region info on filesystem at 1733268550673Initializing all the Stores at 1733268550674 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268550674Cleaning up temporary data from old regions at 1733268550683 (+9 ms)Running coprocessor post-open hooks at 1733268550692 (+9 ms)Region opened successfully at 1733268550692 2024-12-03T23:29:10,693 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=b6ea905c40d950a4163b3b67c8be765a, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:29:10,693 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9., pid=118, masterSystemTime=1733268550670 2024-12-03T23:29:10,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure b6ea905c40d950a4163b3b67c8be765a, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:29:10,694 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:10,695 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:10,695 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=bbd2a25fcf245a74ce51ef1e4f5654f9, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:29:10,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:29:10,706 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=115 2024-12-03T23:29:10,706 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure b6ea905c40d950a4163b3b67c8be765a, server=ed8014902436,41121,1733268370525 in 187 msec 2024-12-03T23:29:10,707 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=116 2024-12-03T23:29:10,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=b6ea905c40d950a4163b3b67c8be765a, ASSIGN in 352 msec 2024-12-03T23:29:10,707 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9, server=ed8014902436,39085,1733268370401 in 187 msec 2024-12-03T23:29:10,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=114 2024-12-03T23:29:10,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=bbd2a25fcf245a74ce51ef1e4f5654f9, ASSIGN in 353 msec 2024-12-03T23:29:10,711 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:29:10,711 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268550711"}]},"ts":"1733268550711"} 2024-12-03T23:29:10,713 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-03T23:29:10,714 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:29:10,714 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-03T23:29:10,717 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-03T23:29:10,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:10,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:10,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:10,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:10,745 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:10,745 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:10,746 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:10,746 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:10,747 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 860 msec 2024-12-03T23:29:11,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-03T23:29:11,029 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-03T23:29:11,029 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-03T23:29:11,032 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-03T23:29:11,032 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:11,032 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:29:11,034 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-03T23:29:11,038 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-03T23:29:11,044 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-03T23:29:11,047 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-03T23:29:11,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268551047 (current time:1733268551047). 2024-12-03T23:29:11,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:29:11,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-03T23:29:11,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:29:11,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f52224, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:11,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:29:11,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:29:11,048 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:29:11,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:29:11,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:29:11,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a05c1e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:11,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:29:11,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:29:11,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:11,050 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51960, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:29:11,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c8c64e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:11,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:29:11,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:29:11,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:29:11,053 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45466, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:29:11,054 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753. 2024-12-03T23:29:11,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:29:11,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:11,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:11,055 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:29:11,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@610bcfb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:11,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:29:11,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:29:11,056 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:29:11,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:29:11,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:29:11,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c8ad213, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:11,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:29:11,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:29:11,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:11,058 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51968, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:29:11,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@542862bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:11,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:29:11,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:29:11,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:29:11,061 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45468, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:29:11,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:29:11,064 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753. 2024-12-03T23:29:11,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:29:11,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:11,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:11,064 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:29:11,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-03T23:29:11,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:29:11,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-03T23:29:11,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-03T23:29:11,067 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:29:11,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-03T23:29:11,068 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:29:11,070 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:29:11,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742096_1272 (size=161) 2024-12-03T23:29:11,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742096_1272 (size=161) 2024-12-03T23:29:11,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742096_1272 (size=161) 2024-12-03T23:29:11,077 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:29:11,077 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b6ea905c40d950a4163b3b67c8be765a}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9}] 2024-12-03T23:29:11,078 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:11,078 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:11,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-03T23:29:11,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-12-03T23:29:11,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-12-03T23:29:11,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:11,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:11,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for b6ea905c40d950a4163b3b67c8be765a: 2024-12-03T23:29:11,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. for emptySnaptb0-testConsecutiveExports completed. 2024-12-03T23:29:11,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for bbd2a25fcf245a74ce51ef1e4f5654f9: 2024-12-03T23:29:11,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. for emptySnaptb0-testConsecutiveExports completed. 2024-12-03T23:29:11,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-03T23:29:11,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:29:11,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:29:11,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-03T23:29:11,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:29:11,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:29:11,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742098_1274 (size=68) 2024-12-03T23:29:11,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742098_1274 (size=68) 2024-12-03T23:29:11,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742097_1273 (size=68) 2024-12-03T23:29:11,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742098_1274 (size=68) 2024-12-03T23:29:11,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742097_1273 (size=68) 2024-12-03T23:29:11,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:11,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742097_1273 (size=68) 2024-12-03T23:29:11,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-03T23:29:11,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:11,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-12-03T23:29:11,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-12-03T23:29:11,245 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:11,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-12-03T23:29:11,246 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:11,246 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:11,246 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:11,247 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b6ea905c40d950a4163b3b67c8be765a in 169 msec 2024-12-03T23:29:11,249 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=119 2024-12-03T23:29:11,249 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:29:11,249 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9 in 169 msec 2024-12-03T23:29:11,249 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:29:11,250 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:29:11,250 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:29:11,251 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:11,251 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-03T23:29:11,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742099_1275 (size=60) 2024-12-03T23:29:11,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742099_1275 (size=60) 2024-12-03T23:29:11,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742099_1275 (size=60) 2024-12-03T23:29:11,258 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:29:11,258 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-03T23:29:11,259 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-03T23:29:11,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742100_1276 (size=641) 2024-12-03T23:29:11,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742100_1276 (size=641) 2024-12-03T23:29:11,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742100_1276 (size=641) 2024-12-03T23:29:11,270 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:29:11,275 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:29:11,275 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-03T23:29:11,277 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:29:11,277 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-03T23:29:11,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 212 msec 2024-12-03T23:29:11,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-03T23:29:11,389 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-03T23:29:11,397 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:29:11,400 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:29:11,401 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-03T23:29:11,404 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-03T23:29:11,404 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:11,404 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:29:11,406 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-03T23:29:11,410 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-03T23:29:11,417 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-03T23:29:11,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-03T23:29:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268551421 (current time:1733268551421). 2024-12-03T23:29:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:29:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-03T23:29:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:29:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55202c58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:29:11,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:29:11,422 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:29:11,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:29:11,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:29:11,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c34a7d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:11,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:29:11,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:29:11,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:11,424 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51996, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:29:11,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26d28904, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:29:11,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:29:11,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:29:11,427 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45484, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:29:11,428 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:29:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:29:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:11,428 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:29:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f7ebca4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:29:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:29:11,430 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:29:11,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:29:11,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:29:11,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@280d4bf9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:11,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:29:11,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:29:11,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:11,431 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52006, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:29:11,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34ec5a2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:11,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:29:11,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:29:11,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:29:11,434 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45490, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:29:11,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:29:11,438 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:29:11,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:29:11,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:11,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:11,438 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:29:11,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-03T23:29:11,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:29:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-03T23:29:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-03T23:29:11,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-03T23:29:11,441 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:29:11,442 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:29:11,444 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:29:11,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742101_1277 (size=156) 2024-12-03T23:29:11,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742101_1277 (size=156) 2024-12-03T23:29:11,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742101_1277 (size=156) 2024-12-03T23:29:11,451 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:29:11,452 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b6ea905c40d950a4163b3b67c8be765a}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9}] 2024-12-03T23:29:11,453 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:11,453 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:11,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-03T23:29:11,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-03T23:29:11,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-12-03T23:29:11,605 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:11,605 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:11,605 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing bbd2a25fcf245a74ce51ef1e4f5654f9 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-12-03T23:29:11,605 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing b6ea905c40d950a4163b3b67c8be765a 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-12-03T23:29:11,620 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203fa95a7ec89724ae2be61262361f3aa4c_b6ea905c40d950a4163b3b67c8be765a is 71, key is 01a0dc64bcd51614eb716b53967daa08/cf:q/1733268551397/Put/seqid=0 2024-12-03T23:29:11,620 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203165485d9c2104feeaa67645f5dfd7e33_bbd2a25fcf245a74ce51ef1e4f5654f9 is 71, key is 1cfdff1d458f3368de593cc5797c60cd/cf:q/1733268551399/Put/seqid=0 2024-12-03T23:29:11,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742102_1278 (size=5381) 2024-12-03T23:29:11,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742103_1279 (size=7892) 2024-12-03T23:29:11,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742103_1279 (size=7892) 2024-12-03T23:29:11,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742102_1278 (size=5381) 2024-12-03T23:29:11,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742102_1278 (size=5381) 2024-12-03T23:29:11,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742103_1279 (size=7892) 2024-12-03T23:29:11,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:11,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:11,635 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203165485d9c2104feeaa67645f5dfd7e33_bbd2a25fcf245a74ce51ef1e4f5654f9 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241203165485d9c2104feeaa67645f5dfd7e33_bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:11,635 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203fa95a7ec89724ae2be61262361f3aa4c_b6ea905c40d950a4163b3b67c8be765a to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241203fa95a7ec89724ae2be61262361f3aa4c_b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:11,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/.tmp/cf/9b391e1efe56481596f33ec0141248aa, store: [table=testtb-testConsecutiveExports family=cf region=b6ea905c40d950a4163b3b67c8be765a] 2024-12-03T23:29:11,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/.tmp/cf/cf9df996ba0244b5b938e9234eb0fc4a, store: [table=testtb-testConsecutiveExports family=cf region=bbd2a25fcf245a74ce51ef1e4f5654f9] 2024-12-03T23:29:11,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/.tmp/cf/cf9df996ba0244b5b938e9234eb0fc4a is 206, key is 14cdc0db0fb577a8898a9040c52df0b77/cf:q/1733268551399/Put/seqid=0 2024-12-03T23:29:11,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/.tmp/cf/9b391e1efe56481596f33ec0141248aa is 206, key is 091606427f8ee0ffb1456c155a8030761/cf:q/1733268551397/Put/seqid=0 2024-12-03T23:29:11,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742105_1281 (size=6720) 2024-12-03T23:29:11,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742105_1281 (size=6720) 2024-12-03T23:29:11,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742105_1281 (size=6720) 2024-12-03T23:29:11,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742104_1280 (size=14047) 2024-12-03T23:29:11,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742104_1280 (size=14047) 2024-12-03T23:29:11,642 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=467, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/.tmp/cf/9b391e1efe56481596f33ec0141248aa 2024-12-03T23:29:11,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742104_1280 (size=14047) 2024-12-03T23:29:11,643 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/.tmp/cf/cf9df996ba0244b5b938e9234eb0fc4a 2024-12-03T23:29:11,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/.tmp/cf/9b391e1efe56481596f33ec0141248aa as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/cf/9b391e1efe56481596f33ec0141248aa 2024-12-03T23:29:11,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/.tmp/cf/cf9df996ba0244b5b938e9234eb0fc4a as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/cf/cf9df996ba0244b5b938e9234eb0fc4a 2024-12-03T23:29:11,652 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/cf/9b391e1efe56481596f33ec0141248aa, entries=7, sequenceid=6, filesize=6.6 K 2024-12-03T23:29:11,652 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/cf/cf9df996ba0244b5b938e9234eb0fc4a, entries=43, sequenceid=6, filesize=13.7 K 2024-12-03T23:29:11,653 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for b6ea905c40d950a4163b3b67c8be765a in 48ms, sequenceid=6, compaction requested=false 2024-12-03T23:29:11,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-03T23:29:11,653 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for bbd2a25fcf245a74ce51ef1e4f5654f9 in 48ms, sequenceid=6, compaction requested=false 2024-12-03T23:29:11,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-03T23:29:11,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for b6ea905c40d950a4163b3b67c8be765a: 2024-12-03T23:29:11,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for bbd2a25fcf245a74ce51ef1e4f5654f9: 2024-12-03T23:29:11,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. for snaptb0-testConsecutiveExports completed. 2024-12-03T23:29:11,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. for snaptb0-testConsecutiveExports completed. 2024-12-03T23:29:11,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-03T23:29:11,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:29:11,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-03T23:29:11,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/cf/9b391e1efe56481596f33ec0141248aa] hfiles 2024-12-03T23:29:11,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:29:11,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/cf/9b391e1efe56481596f33ec0141248aa for snapshot=snaptb0-testConsecutiveExports 2024-12-03T23:29:11,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/cf/cf9df996ba0244b5b938e9234eb0fc4a] hfiles 2024-12-03T23:29:11,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/cf/cf9df996ba0244b5b938e9234eb0fc4a for snapshot=snaptb0-testConsecutiveExports 2024-12-03T23:29:11,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742107_1283 (size=107) 2024-12-03T23:29:11,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742107_1283 (size=107) 2024-12-03T23:29:11,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742107_1283 (size=107) 2024-12-03T23:29:11,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:11,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-03T23:29:11,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-12-03T23:29:11,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:11,666 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:11,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742106_1282 (size=107) 2024-12-03T23:29:11,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742106_1282 (size=107) 2024-12-03T23:29:11,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742106_1282 (size=107) 2024-12-03T23:29:11,668 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b6ea905c40d950a4163b3b67c8be765a in 215 msec 2024-12-03T23:29:11,669 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:11,669 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-12-03T23:29:11,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-12-03T23:29:11,669 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:11,669 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:11,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=124, resume processing ppid=122 2024-12-03T23:29:11,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9 in 218 msec 2024-12-03T23:29:11,671 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:29:11,672 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:29:11,673 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:29:11,673 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:29:11,673 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:11,674 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241203165485d9c2104feeaa67645f5dfd7e33_bbd2a25fcf245a74ce51ef1e4f5654f9, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241203fa95a7ec89724ae2be61262361f3aa4c_b6ea905c40d950a4163b3b67c8be765a] hfiles 2024-12-03T23:29:11,674 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241203165485d9c2104feeaa67645f5dfd7e33_bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:11,674 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241203fa95a7ec89724ae2be61262361f3aa4c_b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:11,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742108_1284 (size=291) 2024-12-03T23:29:11,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742108_1284 (size=291) 2024-12-03T23:29:11,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742108_1284 (size=291) 2024-12-03T23:29:11,687 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:29:11,687 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-03T23:29:11,688 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-03T23:29:11,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742109_1285 (size=951) 2024-12-03T23:29:11,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742109_1285 (size=951) 2024-12-03T23:29:11,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742109_1285 (size=951) 2024-12-03T23:29:11,704 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:29:11,711 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:29:11,711 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-03T23:29:11,713 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:29:11,713 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-03T23:29:11,714 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 274 msec 2024-12-03T23:29:11,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-03T23:29:11,759 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-03T23:29:11,760 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759 2024-12-03T23:29:11,760 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759, srcFsUri=hdfs://localhost:37331, srcDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:29:11,796 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37331, inputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:29:11,796 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@3a8441fb, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-03T23:29:11,797 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-03T23:29:11,801 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-03T23:29:11,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:11,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:11,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:12,679 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-4605788858189057955.jar 2024-12-03T23:29:12,680 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:12,680 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:12,725 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_2/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000004/launch_container.sh] 2024-12-03T23:29:12,725 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_2/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000004/container_tokens] 2024-12-03T23:29:12,725 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_2/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000004/sysfs] 2024-12-03T23:29:12,735 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-761599788055923369.jar 2024-12-03T23:29:12,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:12,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:12,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:12,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:12,737 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:12,737 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:12,737 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-03T23:29:12,738 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-03T23:29:12,738 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-03T23:29:12,738 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-03T23:29:12,739 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-03T23:29:12,739 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-03T23:29:12,739 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-03T23:29:12,739 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-03T23:29:12,740 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-03T23:29:12,740 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-03T23:29:12,740 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-03T23:29:12,740 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:29:12,741 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:29:12,741 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:29:12,741 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:29:12,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:29:12,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:29:12,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:29:12,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742110_1286 (size=131440) 2024-12-03T23:29:12,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742110_1286 (size=131440) 2024-12-03T23:29:12,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742110_1286 (size=131440) 2024-12-03T23:29:12,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742111_1287 (size=4188619) 2024-12-03T23:29:12,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742111_1287 (size=4188619) 2024-12-03T23:29:12,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742111_1287 (size=4188619) 2024-12-03T23:29:12,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742112_1288 (size=1323991) 2024-12-03T23:29:12,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742112_1288 (size=1323991) 2024-12-03T23:29:12,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742112_1288 (size=1323991) 2024-12-03T23:29:12,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742113_1289 (size=903924) 2024-12-03T23:29:12,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742113_1289 (size=903924) 2024-12-03T23:29:12,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742113_1289 (size=903924) 2024-12-03T23:29:12,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742114_1290 (size=8360360) 2024-12-03T23:29:12,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742114_1290 (size=8360360) 2024-12-03T23:29:12,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742114_1290 (size=8360360) 2024-12-03T23:29:13,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742115_1291 (size=1877034) 2024-12-03T23:29:13,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742115_1291 (size=1877034) 2024-12-03T23:29:13,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742115_1291 (size=1877034) 2024-12-03T23:29:13,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742116_1292 (size=77835) 2024-12-03T23:29:13,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742116_1292 (size=77835) 2024-12-03T23:29:13,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742116_1292 (size=77835) 2024-12-03T23:29:13,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742117_1293 (size=30949) 2024-12-03T23:29:13,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742117_1293 (size=30949) 2024-12-03T23:29:13,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742117_1293 (size=30949) 2024-12-03T23:29:13,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742118_1294 (size=1597213) 2024-12-03T23:29:13,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742118_1294 (size=1597213) 2024-12-03T23:29:13,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742118_1294 (size=1597213) 2024-12-03T23:29:13,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742119_1295 (size=4695811) 2024-12-03T23:29:13,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742119_1295 (size=4695811) 2024-12-03T23:29:13,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742119_1295 (size=4695811) 2024-12-03T23:29:13,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742120_1296 (size=232957) 2024-12-03T23:29:13,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742120_1296 (size=232957) 2024-12-03T23:29:13,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742120_1296 (size=232957) 2024-12-03T23:29:13,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742121_1297 (size=127628) 2024-12-03T23:29:13,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742121_1297 (size=127628) 2024-12-03T23:29:13,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742121_1297 (size=127628) 2024-12-03T23:29:13,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742122_1298 (size=20406) 2024-12-03T23:29:13,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742122_1298 (size=20406) 2024-12-03T23:29:13,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742122_1298 (size=20406) 2024-12-03T23:29:13,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742123_1299 (size=5175431) 2024-12-03T23:29:13,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742123_1299 (size=5175431) 2024-12-03T23:29:13,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742123_1299 (size=5175431) 2024-12-03T23:29:13,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742124_1300 (size=217634) 2024-12-03T23:29:13,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742124_1300 (size=217634) 2024-12-03T23:29:13,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742124_1300 (size=217634) 2024-12-03T23:29:13,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742125_1301 (size=1832290) 2024-12-03T23:29:13,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742125_1301 (size=1832290) 2024-12-03T23:29:13,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742125_1301 (size=1832290) 2024-12-03T23:29:13,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742126_1302 (size=322274) 2024-12-03T23:29:13,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742126_1302 (size=322274) 2024-12-03T23:29:13,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742126_1302 (size=322274) 2024-12-03T23:29:13,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742127_1303 (size=503880) 2024-12-03T23:29:13,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742127_1303 (size=503880) 2024-12-03T23:29:13,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742127_1303 (size=503880) 2024-12-03T23:29:13,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742128_1304 (size=29229) 2024-12-03T23:29:13,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742128_1304 (size=29229) 2024-12-03T23:29:13,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742128_1304 (size=29229) 2024-12-03T23:29:13,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742129_1305 (size=24096) 2024-12-03T23:29:13,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742129_1305 (size=24096) 2024-12-03T23:29:13,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742129_1305 (size=24096) 2024-12-03T23:29:13,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742130_1306 (size=6424741) 2024-12-03T23:29:13,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742130_1306 (size=6424741) 2024-12-03T23:29:13,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742130_1306 (size=6424741) 2024-12-03T23:29:13,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742131_1307 (size=443172) 2024-12-03T23:29:13,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742131_1307 (size=443172) 2024-12-03T23:29:13,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742131_1307 (size=443172) 2024-12-03T23:29:13,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742132_1308 (size=111872) 2024-12-03T23:29:13,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742132_1308 (size=111872) 2024-12-03T23:29:13,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742132_1308 (size=111872) 2024-12-03T23:29:13,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742133_1309 (size=45609) 2024-12-03T23:29:13,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742133_1309 (size=45609) 2024-12-03T23:29:13,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742133_1309 (size=45609) 2024-12-03T23:29:13,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742134_1310 (size=136454) 2024-12-03T23:29:13,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742134_1310 (size=136454) 2024-12-03T23:29:13,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742134_1310 (size=136454) 2024-12-03T23:29:13,567 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-03T23:29:13,569 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-03T23:29:13,571 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=13.7 K 2024-12-03T23:29:13,572 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.7 K 2024-12-03T23:29:13,572 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.6 K 2024-12-03T23:29:13,572 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.3 K 2024-12-03T23:29:13,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742135_1311 (size=1023) 2024-12-03T23:29:13,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742135_1311 (size=1023) 2024-12-03T23:29:13,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742135_1311 (size=1023) 2024-12-03T23:29:13,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742136_1312 (size=35) 2024-12-03T23:29:13,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742136_1312 (size=35) 2024-12-03T23:29:13,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742136_1312 (size=35) 2024-12-03T23:29:13,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742137_1313 (size=304039) 2024-12-03T23:29:13,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742137_1313 (size=304039) 2024-12-03T23:29:13,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742137_1313 (size=304039) 2024-12-03T23:29:13,651 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region bbd2a25fcf245a74ce51ef1e4f5654f9 changed from -1.0 to 0.0, refreshing cache 2024-12-03T23:29:13,651 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b6ea905c40d950a4163b3b67c8be765a changed from -1.0 to 0.0, refreshing cache 2024-12-03T23:29:13,886 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:29:13,886 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:29:13,890 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0004_000001 (auth:SIMPLE) from 127.0.0.1:51862 2024-12-03T23:29:13,900 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000001/launch_container.sh] 2024-12-03T23:29:13,900 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000001/container_tokens] 2024-12-03T23:29:13,900 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0004/container_1733268377251_0004_01_000001/sysfs] 2024-12-03T23:29:14,623 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0005_000001 (auth:SIMPLE) from 127.0.0.1:42276 2024-12-03T23:29:14,727 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:29:19,443 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0005_000001 (auth:SIMPLE) from 127.0.0.1:55572 2024-12-03T23:29:19,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742138_1314 (size=349737) 2024-12-03T23:29:19,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742138_1314 (size=349737) 2024-12-03T23:29:19,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742138_1314 (size=349737) 2024-12-03T23:29:19,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-03T23:29:19,918 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-03T23:29:21,738 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0005_000001 (auth:SIMPLE) from 127.0.0.1:42286 2024-12-03T23:29:21,742 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0005_000001 (auth:SIMPLE) from 127.0.0.1:51874 2024-12-03T23:29:22,554 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0005_000001 (auth:SIMPLE) from 127.0.0.1:57602 2024-12-03T23:29:22,580 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0005_000001 (auth:SIMPLE) from 127.0.0.1:57964 2024-12-03T23:29:24,890 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0005_01_000006 while processing FINISH_CONTAINERS event 2024-12-03T23:29:28,271 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000002/launch_container.sh] 2024-12-03T23:29:28,271 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000002/container_tokens] 2024-12-03T23:29:28,271 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000002/sysfs] 2024-12-03T23:29:30,187 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000004/launch_container.sh] 2024-12-03T23:29:30,187 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000004/container_tokens] 2024-12-03T23:29:30,187 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000004/sysfs] 2024-12-03T23:29:30,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742139_1315 (size=31808) 2024-12-03T23:29:30,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742139_1315 (size=31808) 2024-12-03T23:29:30,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742139_1315 (size=31808) 2024-12-03T23:29:30,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742140_1316 (size=463) 2024-12-03T23:29:30,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742140_1316 (size=463) 2024-12-03T23:29:30,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742140_1316 (size=463) 2024-12-03T23:29:30,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742141_1317 (size=31808) 2024-12-03T23:29:30,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742141_1317 (size=31808) 2024-12-03T23:29:30,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742141_1317 (size=31808) 2024-12-03T23:29:30,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742142_1318 (size=349737) 2024-12-03T23:29:30,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742142_1318 (size=349737) 2024-12-03T23:29:30,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742142_1318 (size=349737) 2024-12-03T23:29:30,440 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0005_000001 (auth:SIMPLE) from 127.0.0.1:57970 2024-12-03T23:29:30,461 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0005_000001 (auth:SIMPLE) from 127.0.0.1:57610 2024-12-03T23:29:30,470 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0005_000001 (auth:SIMPLE) from 127.0.0.1:57972 2024-12-03T23:29:31,927 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-03T23:29:31,927 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-03T23:29:31,959 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-03T23:29:31,960 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-03T23:29:31,960 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-03T23:29:31,960 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-03T23:29:31,975 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-03T23:29:31,975 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-03T23:29:31,975 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@3a8441fb in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-03T23:29:31,976 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-03T23:29:31,976 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-03T23:29:31,978 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759, srcFsUri=hdfs://localhost:37331, srcDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:29:32,015 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37331, inputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:29:32,015 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@3a8441fb, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-03T23:29:32,055 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-03T23:29:32,085 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-03T23:29:32,125 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:32,125 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:32,125 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:33,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-8636296632456748153.jar 2024-12-03T23:29:33,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:33,247 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:33,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-14426619628817062047.jar 2024-12-03T23:29:33,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:33,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:33,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:33,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:33,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:33,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:33,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-03T23:29:33,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-03T23:29:33,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-03T23:29:33,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-03T23:29:33,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-03T23:29:33,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-03T23:29:33,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-03T23:29:33,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-03T23:29:33,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-03T23:29:33,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-03T23:29:33,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-03T23:29:33,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:29:33,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:29:33,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:29:33,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:29:33,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:29:33,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:29:33,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:29:33,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742143_1319 (size=131440) 2024-12-03T23:29:33,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742143_1319 (size=131440) 2024-12-03T23:29:33,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742143_1319 (size=131440) 2024-12-03T23:29:33,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742144_1320 (size=4188619) 2024-12-03T23:29:33,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742144_1320 (size=4188619) 2024-12-03T23:29:33,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742144_1320 (size=4188619) 2024-12-03T23:29:33,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742145_1321 (size=1323991) 2024-12-03T23:29:33,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742145_1321 (size=1323991) 2024-12-03T23:29:33,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742145_1321 (size=1323991) 2024-12-03T23:29:33,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742146_1322 (size=903924) 2024-12-03T23:29:33,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742146_1322 (size=903924) 2024-12-03T23:29:33,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742146_1322 (size=903924) 2024-12-03T23:29:33,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742147_1323 (size=8360360) 2024-12-03T23:29:33,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742147_1323 (size=8360360) 2024-12-03T23:29:33,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742147_1323 (size=8360360) 2024-12-03T23:29:33,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742148_1324 (size=1877034) 2024-12-03T23:29:33,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742148_1324 (size=1877034) 2024-12-03T23:29:33,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742148_1324 (size=1877034) 2024-12-03T23:29:33,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742149_1325 (size=77835) 2024-12-03T23:29:33,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742149_1325 (size=77835) 2024-12-03T23:29:33,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742149_1325 (size=77835) 2024-12-03T23:29:33,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742150_1326 (size=30949) 2024-12-03T23:29:33,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742150_1326 (size=30949) 2024-12-03T23:29:33,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742150_1326 (size=30949) 2024-12-03T23:29:33,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742151_1327 (size=1597213) 2024-12-03T23:29:33,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742151_1327 (size=1597213) 2024-12-03T23:29:33,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742151_1327 (size=1597213) 2024-12-03T23:29:33,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742152_1328 (size=4695811) 2024-12-03T23:29:33,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742152_1328 (size=4695811) 2024-12-03T23:29:33,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742152_1328 (size=4695811) 2024-12-03T23:29:33,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742153_1329 (size=232957) 2024-12-03T23:29:33,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742153_1329 (size=232957) 2024-12-03T23:29:33,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742153_1329 (size=232957) 2024-12-03T23:29:33,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742154_1330 (size=127628) 2024-12-03T23:29:33,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742154_1330 (size=127628) 2024-12-03T23:29:33,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742154_1330 (size=127628) 2024-12-03T23:29:33,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742155_1331 (size=20406) 2024-12-03T23:29:33,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742155_1331 (size=20406) 2024-12-03T23:29:33,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742155_1331 (size=20406) 2024-12-03T23:29:33,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742156_1332 (size=5175431) 2024-12-03T23:29:33,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742156_1332 (size=5175431) 2024-12-03T23:29:33,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742156_1332 (size=5175431) 2024-12-03T23:29:33,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742157_1333 (size=217634) 2024-12-03T23:29:33,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742157_1333 (size=217634) 2024-12-03T23:29:33,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742157_1333 (size=217634) 2024-12-03T23:29:33,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742158_1334 (size=1832290) 2024-12-03T23:29:33,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742158_1334 (size=1832290) 2024-12-03T23:29:33,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742158_1334 (size=1832290) 2024-12-03T23:29:33,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742159_1335 (size=322274) 2024-12-03T23:29:33,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742159_1335 (size=322274) 2024-12-03T23:29:33,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742159_1335 (size=322274) 2024-12-03T23:29:33,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742160_1336 (size=443172) 2024-12-03T23:29:33,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742160_1336 (size=443172) 2024-12-03T23:29:33,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742160_1336 (size=443172) 2024-12-03T23:29:33,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742161_1337 (size=503880) 2024-12-03T23:29:33,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742161_1337 (size=503880) 2024-12-03T23:29:33,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742161_1337 (size=503880) 2024-12-03T23:29:33,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742162_1338 (size=29229) 2024-12-03T23:29:33,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742162_1338 (size=29229) 2024-12-03T23:29:33,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742162_1338 (size=29229) 2024-12-03T23:29:33,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742163_1339 (size=24096) 2024-12-03T23:29:33,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742163_1339 (size=24096) 2024-12-03T23:29:33,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742163_1339 (size=24096) 2024-12-03T23:29:33,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742164_1340 (size=111872) 2024-12-03T23:29:33,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742164_1340 (size=111872) 2024-12-03T23:29:33,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742164_1340 (size=111872) 2024-12-03T23:29:33,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742165_1341 (size=6424741) 2024-12-03T23:29:33,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742165_1341 (size=6424741) 2024-12-03T23:29:33,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742165_1341 (size=6424741) 2024-12-03T23:29:33,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742166_1342 (size=45609) 2024-12-03T23:29:33,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742166_1342 (size=45609) 2024-12-03T23:29:33,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742166_1342 (size=45609) 2024-12-03T23:29:33,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742167_1343 (size=136454) 2024-12-03T23:29:33,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742167_1343 (size=136454) 2024-12-03T23:29:33,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742167_1343 (size=136454) 2024-12-03T23:29:33,821 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-03T23:29:33,823 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-03T23:29:33,825 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=13.7 K 2024-12-03T23:29:33,825 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.7 K 2024-12-03T23:29:33,825 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.6 K 2024-12-03T23:29:33,825 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.3 K 2024-12-03T23:29:33,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742168_1344 (size=1023) 2024-12-03T23:29:33,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742168_1344 (size=1023) 2024-12-03T23:29:33,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742168_1344 (size=1023) 2024-12-03T23:29:33,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742169_1345 (size=35) 2024-12-03T23:29:33,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742169_1345 (size=35) 2024-12-03T23:29:33,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742169_1345 (size=35) 2024-12-03T23:29:33,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742170_1346 (size=304043) 2024-12-03T23:29:33,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742170_1346 (size=304043) 2024-12-03T23:29:33,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742170_1346 (size=304043) 2024-12-03T23:29:35,391 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_0/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000005/launch_container.sh] 2024-12-03T23:29:35,391 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_0/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000005/container_tokens] 2024-12-03T23:29:35,391 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_0/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000005/sysfs] 2024-12-03T23:29:35,435 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_0/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000003/launch_container.sh] 2024-12-03T23:29:35,435 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_0/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000003/container_tokens] 2024-12-03T23:29:35,435 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_0/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000003/sysfs] 2024-12-03T23:29:36,532 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:29:36,532 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:29:36,534 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0005_000001 (auth:SIMPLE) from 127.0.0.1:54610 2024-12-03T23:29:36,542 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000001/launch_container.sh] 2024-12-03T23:29:36,542 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000001/container_tokens] 2024-12-03T23:29:36,542 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0005/container_1733268377251_0005_01_000001/sysfs] 2024-12-03T23:29:37,325 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0006_000001 (auth:SIMPLE) from 127.0.0.1:33836 2024-12-03T23:29:38,448 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:29:41,890 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0006_000001 (auth:SIMPLE) from 127.0.0.1:50022 2024-12-03T23:29:42,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742171_1347 (size=349741) 2024-12-03T23:29:42,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742171_1347 (size=349741) 2024-12-03T23:29:42,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742171_1347 (size=349741) 2024-12-03T23:29:44,169 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0006_000001 (auth:SIMPLE) from 127.0.0.1:50672 2024-12-03T23:29:44,169 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0006_000001 (auth:SIMPLE) from 127.0.0.1:37196 2024-12-03T23:29:44,996 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0006_000001 (auth:SIMPLE) from 127.0.0.1:50684 2024-12-03T23:29:45,028 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0006_000001 (auth:SIMPLE) from 127.0.0.1:37198 2024-12-03T23:29:47,535 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0006_01_000006 while processing FINISH_CONTAINERS event 2024-12-03T23:29:49,312 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000002/launch_container.sh] 2024-12-03T23:29:49,312 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000002/container_tokens] 2024-12-03T23:29:49,312 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000002/sysfs] 2024-12-03T23:29:51,563 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000004/launch_container.sh] 2024-12-03T23:29:51,563 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000004/container_tokens] 2024-12-03T23:29:51,563 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000004/sysfs] 2024-12-03T23:29:52,268 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_2/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000005/launch_container.sh] 2024-12-03T23:29:52,268 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_2/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000005/container_tokens] 2024-12-03T23:29:52,268 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_2/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000005/sysfs] 2024-12-03T23:29:52,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742172_1348 (size=29746) 2024-12-03T23:29:52,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742172_1348 (size=29746) 2024-12-03T23:29:52,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742172_1348 (size=29746) 2024-12-03T23:29:52,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742173_1349 (size=463) 2024-12-03T23:29:52,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742173_1349 (size=463) 2024-12-03T23:29:52,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742173_1349 (size=463) 2024-12-03T23:29:52,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742174_1350 (size=29746) 2024-12-03T23:29:52,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742174_1350 (size=29746) 2024-12-03T23:29:52,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742174_1350 (size=29746) 2024-12-03T23:29:52,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742175_1351 (size=349741) 2024-12-03T23:29:52,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742175_1351 (size=349741) 2024-12-03T23:29:52,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742175_1351 (size=349741) 2024-12-03T23:29:52,605 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0006_000001 (auth:SIMPLE) from 127.0.0.1:43176 2024-12-03T23:29:52,613 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0006_000001 (auth:SIMPLE) from 127.0.0.1:43192 2024-12-03T23:29:54,261 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-03T23:29:54,261 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-03T23:29:54,317 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-03T23:29:54,317 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-03T23:29:54,318 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-03T23:29:54,318 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-03T23:29:54,363 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-03T23:29:54,363 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-03T23:29:54,363 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@3a8441fb in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-03T23:29:54,364 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-03T23:29:54,364 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268551759/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-03T23:29:54,385 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-03T23:29:54,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-03T23:29:54,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-03T23:29:54,390 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268594390"}]},"ts":"1733268594390"} 2024-12-03T23:29:54,393 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-03T23:29:54,393 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-03T23:29:54,394 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-03T23:29:54,396 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=b6ea905c40d950a4163b3b67c8be765a, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=bbd2a25fcf245a74ce51ef1e4f5654f9, UNASSIGN}] 2024-12-03T23:29:54,398 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=b6ea905c40d950a4163b3b67c8be765a, UNASSIGN 2024-12-03T23:29:54,398 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=bbd2a25fcf245a74ce51ef1e4f5654f9, UNASSIGN 2024-12-03T23:29:54,399 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=b6ea905c40d950a4163b3b67c8be765a, regionState=CLOSING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:29:54,399 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=bbd2a25fcf245a74ce51ef1e4f5654f9, regionState=CLOSING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:29:54,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=b6ea905c40d950a4163b3b67c8be765a, UNASSIGN because future has completed 2024-12-03T23:29:54,402 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:29:54,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure b6ea905c40d950a4163b3b67c8be765a, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:29:54,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=bbd2a25fcf245a74ce51ef1e4f5654f9, UNASSIGN because future has completed 2024-12-03T23:29:54,405 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:29:54,405 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:29:54,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-03T23:29:54,556 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:54,556 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:29:54,556 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing b6ea905c40d950a4163b3b67c8be765a, disabling compactions & flushes 2024-12-03T23:29:54,556 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:54,556 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:54,556 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. after waiting 0 ms 2024-12-03T23:29:54,556 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:54,559 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:54,559 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:29:54,559 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing bbd2a25fcf245a74ce51ef1e4f5654f9, disabling compactions & flushes 2024-12-03T23:29:54,559 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:54,559 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:54,559 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. after waiting 0 ms 2024-12-03T23:29:54,560 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:54,571 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:29:54,572 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:29:54,572 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a. 2024-12-03T23:29:54,572 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for b6ea905c40d950a4163b3b67c8be765a: Waiting for close lock at 1733268594556Running coprocessor pre-close hooks at 1733268594556Disabling compacts and flushes for region at 1733268594556Disabling writes for close at 1733268594556Writing region close event to WAL at 1733268594557 (+1 ms)Running coprocessor post-close hooks at 1733268594572 (+15 ms)Closed at 1733268594572 2024-12-03T23:29:54,574 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:54,575 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=b6ea905c40d950a4163b3b67c8be765a, regionState=CLOSED 2024-12-03T23:29:54,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure b6ea905c40d950a4163b3b67c8be765a, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:29:54,584 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-12-03T23:29:54,586 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=b6ea905c40d950a4163b3b67c8be765a, UNASSIGN in 188 msec 2024-12-03T23:29:54,586 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure b6ea905c40d950a4163b3b67c8be765a, server=ed8014902436,41121,1733268370525 in 179 msec 2024-12-03T23:29:54,594 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:29:54,594 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:29:54,594 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9. 2024-12-03T23:29:54,594 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for bbd2a25fcf245a74ce51ef1e4f5654f9: Waiting for close lock at 1733268594559Running coprocessor pre-close hooks at 1733268594559Disabling compacts and flushes for region at 1733268594559Disabling writes for close at 1733268594560 (+1 ms)Writing region close event to WAL at 1733268594570 (+10 ms)Running coprocessor post-close hooks at 1733268594594 (+24 ms)Closed at 1733268594594 2024-12-03T23:29:54,597 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:54,597 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=bbd2a25fcf245a74ce51ef1e4f5654f9, regionState=CLOSED 2024-12-03T23:29:54,601 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:29:54,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=128 2024-12-03T23:29:54,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure bbd2a25fcf245a74ce51ef1e4f5654f9, server=ed8014902436,39085,1733268370401 in 198 msec 2024-12-03T23:29:54,612 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-12-03T23:29:54,612 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=bbd2a25fcf245a74ce51ef1e4f5654f9, UNASSIGN in 213 msec 2024-12-03T23:29:54,615 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-12-03T23:29:54,615 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 219 msec 2024-12-03T23:29:54,617 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268594616"}]},"ts":"1733268594616"} 2024-12-03T23:29:54,619 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-03T23:29:54,619 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-03T23:29:54,622 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 235 msec 2024-12-03T23:29:54,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-03T23:29:54,713 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-03T23:29:54,713 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-03T23:29:54,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-03T23:29:54,716 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-03T23:29:54,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-03T23:29:54,719 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-03T23:29:54,722 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-03T23:29:54,732 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:54,736 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/recovered.edits] 2024-12-03T23:29:54,743 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/cf/9b391e1efe56481596f33ec0141248aa to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/cf/9b391e1efe56481596f33ec0141248aa 2024-12-03T23:29:54,745 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:54,752 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/recovered.edits] 2024-12-03T23:29:54,847 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a/recovered.edits/9.seqid 2024-12-03T23:29:54,848 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:54,853 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/cf/cf9df996ba0244b5b938e9234eb0fc4a to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/cf/cf9df996ba0244b5b938e9234eb0fc4a 2024-12-03T23:29:54,864 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9/recovered.edits/9.seqid 2024-12-03T23:29:54,865 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testConsecutiveExports/bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:54,865 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-03T23:29:54,865 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-03T23:29:54,867 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-12-03T23:29:54,870 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241203165485d9c2104feeaa67645f5dfd7e33_bbd2a25fcf245a74ce51ef1e4f5654f9 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241203165485d9c2104feeaa67645f5dfd7e33_bbd2a25fcf245a74ce51ef1e4f5654f9 2024-12-03T23:29:54,875 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241203fa95a7ec89724ae2be61262361f3aa4c_b6ea905c40d950a4163b3b67c8be765a to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241203fa95a7ec89724ae2be61262361f3aa4c_b6ea905c40d950a4163b3b67c8be765a 2024-12-03T23:29:54,876 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-03T23:29:54,880 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-03T23:29:54,883 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-03T23:29:55,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-03T23:29:55,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-03T23:29:55,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-03T23:29:55,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-03T23:29:55,075 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-03T23:29:55,076 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-03T23:29:55,076 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-03T23:29:55,076 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-03T23:29:55,076 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-03T23:29:55,079 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-03T23:29:55,079 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-03T23:29:55,079 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268595079"}]},"ts":"9223372036854775807"} 2024-12-03T23:29:55,079 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268595079"}]},"ts":"9223372036854775807"} 2024-12-03T23:29:55,082 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-03T23:29:55,082 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b6ea905c40d950a4163b3b67c8be765a, NAME => 'testtb-testConsecutiveExports,,1733268549885.b6ea905c40d950a4163b3b67c8be765a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => bbd2a25fcf245a74ce51ef1e4f5654f9, NAME => 'testtb-testConsecutiveExports,1,1733268549885.bbd2a25fcf245a74ce51ef1e4f5654f9.', STARTKEY => '1', ENDKEY => ''}] 2024-12-03T23:29:55,082 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-03T23:29:55,082 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733268595082"}]},"ts":"9223372036854775807"} 2024-12-03T23:29:55,088 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-12-03T23:29:55,089 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-03T23:29:55,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 375 msec 2024-12-03T23:29:55,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-03T23:29:55,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-03T23:29:55,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-03T23:29:55,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:55,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:55,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:55,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-03T23:29:55,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:55,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-03T23:29:55,178 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-12-03T23:29:55,178 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-03T23:29:55,184 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-12-03T23:29:55,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-03T23:29:55,188 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-12-03T23:29:55,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-03T23:29:55,213 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=812 (was 806) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Thread-5490 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: process reaper (pid 145724) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:55966 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:57174 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:39511 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:58760 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-767975473_1 at /127.0.0.1:58740 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39511 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-767975473_1 at /127.0.0.1:55932 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=803 (was 813), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=797 (was 728) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 20), AvailableMemoryMB=2385 (was 2868) 2024-12-03T23:29:55,213 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=812 is superior to 500 2024-12-03T23:29:55,233 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=812, OpenFileDescriptor=803, MaxFileDescriptor=1048576, SystemLoadAverage=797, ProcessCount=20, AvailableMemoryMB=2382 2024-12-03T23:29:55,234 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=812 is superior to 500 2024-12-03T23:29:55,235 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:29:55,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:55,237 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:29:55,237 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-12-03T23:29:55,238 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:29:55,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-03T23:29:55,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742176_1352 (size=458) 2024-12-03T23:29:55,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742176_1352 (size=458) 2024-12-03T23:29:55,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742176_1352 (size=458) 2024-12-03T23:29:55,263 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => cb8fd3807df9534ef4d2d133dcd44fc7, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:29:55,263 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 44fc121d055419da5089ab46382aee4c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:29:55,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742177_1353 (size=83) 2024-12-03T23:29:55,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742177_1353 (size=83) 2024-12-03T23:29:55,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742177_1353 (size=83) 2024-12-03T23:29:55,278 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:29:55,279 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 44fc121d055419da5089ab46382aee4c, disabling compactions & flushes 2024-12-03T23:29:55,279 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:29:55,279 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:29:55,279 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. after waiting 0 ms 2024-12-03T23:29:55,279 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:29:55,279 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:29:55,279 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 44fc121d055419da5089ab46382aee4c: Waiting for close lock at 1733268595279Disabling compacts and flushes for region at 1733268595279Disabling writes for close at 1733268595279Writing region close event to WAL at 1733268595279Closed at 1733268595279 2024-12-03T23:29:55,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742178_1354 (size=83) 2024-12-03T23:29:55,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742178_1354 (size=83) 2024-12-03T23:29:55,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742178_1354 (size=83) 2024-12-03T23:29:55,286 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:29:55,286 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing cb8fd3807df9534ef4d2d133dcd44fc7, disabling compactions & flushes 2024-12-03T23:29:55,287 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:29:55,287 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:29:55,287 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. after waiting 0 ms 2024-12-03T23:29:55,287 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:29:55,287 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:29:55,287 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for cb8fd3807df9534ef4d2d133dcd44fc7: Waiting for close lock at 1733268595286Disabling compacts and flushes for region at 1733268595286Disabling writes for close at 1733268595287 (+1 ms)Writing region close event to WAL at 1733268595287Closed at 1733268595287 2024-12-03T23:29:55,288 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:29:55,288 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733268595288"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268595288"}]},"ts":"1733268595288"} 2024-12-03T23:29:55,288 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733268595288"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268595288"}]},"ts":"1733268595288"} 2024-12-03T23:29:55,290 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-03T23:29:55,291 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:29:55,291 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268595291"}]},"ts":"1733268595291"} 2024-12-03T23:29:55,293 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-03T23:29:55,293 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:29:55,294 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:29:55,294 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:29:55,294 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:29:55,294 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:29:55,294 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:29:55,294 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:29:55,294 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:29:55,294 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:29:55,294 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:29:55,294 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:29:55,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=44fc121d055419da5089ab46382aee4c, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cb8fd3807df9534ef4d2d133dcd44fc7, ASSIGN}] 2024-12-03T23:29:55,296 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cb8fd3807df9534ef4d2d133dcd44fc7, ASSIGN 2024-12-03T23:29:55,296 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=44fc121d055419da5089ab46382aee4c, ASSIGN 2024-12-03T23:29:55,296 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=44fc121d055419da5089ab46382aee4c, ASSIGN; state=OFFLINE, location=ed8014902436,39085,1733268370401; forceNewPlan=false, retain=false 2024-12-03T23:29:55,296 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cb8fd3807df9534ef4d2d133dcd44fc7, ASSIGN; state=OFFLINE, location=ed8014902436,37351,1733268370600; forceNewPlan=false, retain=false 2024-12-03T23:29:55,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-03T23:29:55,447 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:29:55,447 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=cb8fd3807df9534ef4d2d133dcd44fc7, regionState=OPENING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:29:55,447 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=44fc121d055419da5089ab46382aee4c, regionState=OPENING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:29:55,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cb8fd3807df9534ef4d2d133dcd44fc7, ASSIGN because future has completed 2024-12-03T23:29:55,449 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:29:55,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=44fc121d055419da5089ab46382aee4c, ASSIGN because future has completed 2024-12-03T23:29:55,450 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 44fc121d055419da5089ab46382aee4c, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:29:55,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-03T23:29:55,605 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:29:55,605 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => 44fc121d055419da5089ab46382aee4c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c.', STARTKEY => '', ENDKEY => '1'} 2024-12-03T23:29:55,605 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. service=AccessControlService 2024-12-03T23:29:55,605 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:29:55,606 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:55,606 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:29:55,606 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:55,606 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:55,606 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:29:55,606 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => cb8fd3807df9534ef4d2d133dcd44fc7, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7.', STARTKEY => '1', ENDKEY => ''} 2024-12-03T23:29:55,606 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. service=AccessControlService 2024-12-03T23:29:55,607 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:29:55,607 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:55,607 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:29:55,607 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:55,607 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:55,607 INFO [StoreOpener-44fc121d055419da5089ab46382aee4c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:55,608 INFO [StoreOpener-cb8fd3807df9534ef4d2d133dcd44fc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:55,608 INFO [StoreOpener-44fc121d055419da5089ab46382aee4c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44fc121d055419da5089ab46382aee4c columnFamilyName cf 2024-12-03T23:29:55,609 INFO [StoreOpener-cb8fd3807df9534ef4d2d133dcd44fc7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cb8fd3807df9534ef4d2d133dcd44fc7 columnFamilyName cf 2024-12-03T23:29:55,609 DEBUG [StoreOpener-44fc121d055419da5089ab46382aee4c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:55,610 DEBUG [StoreOpener-cb8fd3807df9534ef4d2d133dcd44fc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:55,610 INFO [StoreOpener-44fc121d055419da5089ab46382aee4c-1 {}] regionserver.HStore(327): Store=44fc121d055419da5089ab46382aee4c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:29:55,610 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:55,611 INFO [StoreOpener-cb8fd3807df9534ef4d2d133dcd44fc7-1 {}] regionserver.HStore(327): Store=cb8fd3807df9534ef4d2d133dcd44fc7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:29:55,611 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:55,611 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:55,611 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:55,612 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:55,612 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:55,612 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:55,612 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:55,612 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:55,612 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:55,613 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:55,613 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:55,615 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:29:55,615 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened 44fc121d055419da5089ab46382aee4c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70338073, jitterRate=0.048118963837623596}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:29:55,615 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:55,616 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for 44fc121d055419da5089ab46382aee4c: Running coprocessor pre-open hook at 1733268595606Writing region info on filesystem at 1733268595606Initializing all the Stores at 1733268595606Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268595607 (+1 ms)Cleaning up temporary data from old regions at 1733268595612 (+5 ms)Running coprocessor post-open hooks at 1733268595615 (+3 ms)Region opened successfully at 1733268595616 (+1 ms) 2024-12-03T23:29:55,621 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:29:55,621 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c., pid=136, masterSystemTime=1733268595602 2024-12-03T23:29:55,622 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened cb8fd3807df9534ef4d2d133dcd44fc7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68659605, jitterRate=0.02310784161090851}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:29:55,622 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:55,622 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for cb8fd3807df9534ef4d2d133dcd44fc7: Running coprocessor pre-open hook at 1733268595607Writing region info on filesystem at 1733268595607Initializing all the Stores at 1733268595608 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268595608Cleaning up temporary data from old regions at 1733268595612 (+4 ms)Running coprocessor post-open hooks at 1733268595622 (+10 ms)Region opened successfully at 1733268595622 2024-12-03T23:29:55,627 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7., pid=135, masterSystemTime=1733268595601 2024-12-03T23:29:55,628 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:29:55,628 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:29:55,629 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=44fc121d055419da5089ab46382aee4c, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:29:55,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 44fc121d055419da5089ab46382aee4c, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:29:55,632 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:29:55,632 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:29:55,633 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=cb8fd3807df9534ef4d2d133dcd44fc7, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:29:55,635 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:29:55,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=133 2024-12-03T23:29:55,638 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 44fc121d055419da5089ab46382aee4c, server=ed8014902436,39085,1733268370401 in 183 msec 2024-12-03T23:29:55,638 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=134 2024-12-03T23:29:55,638 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7, server=ed8014902436,37351,1733268370600 in 186 msec 2024-12-03T23:29:55,639 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=44fc121d055419da5089ab46382aee4c, ASSIGN in 342 msec 2024-12-03T23:29:55,640 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-12-03T23:29:55,640 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cb8fd3807df9534ef4d2d133dcd44fc7, ASSIGN in 344 msec 2024-12-03T23:29:55,641 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:29:55,641 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268595641"}]},"ts":"1733268595641"} 2024-12-03T23:29:55,643 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-03T23:29:55,644 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:29:55,645 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-03T23:29:55,652 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-03T23:29:55,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:55,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:55,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:55,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:55,723 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:55,723 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:55,723 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:55,723 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:55,725 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 488 msec 2024-12-03T23:29:55,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-03T23:29:55,869 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-03T23:29:55,869 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-03T23:29:55,871 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:55,871 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:29:55,872 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:29:55,873 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-03T23:29:55,877 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-03T23:29:55,881 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-03T23:29:55,883 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-03T23:29:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268595883 (current time:1733268595883). 2024-12-03T23:29:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:29:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-03T23:29:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:29:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ad50227, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:29:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:29:55,885 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:29:55,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:29:55,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:29:55,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e824ee4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:55,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:29:55,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:29:55,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:55,886 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39540, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:29:55,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a6a2dad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:55,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:29:55,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:29:55,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:29:55,889 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60158, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:29:55,890 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:29:55,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:29:55,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:55,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:55,890 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:29:55,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2195a7ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:55,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:29:55,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:29:55,891 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:29:55,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:29:55,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:29:55,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f2d4e14, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:55,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:29:55,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:29:55,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:55,892 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39558, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:29:55,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52dc61a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:55,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:29:55,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:29:55,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:29:55,895 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60168, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:29:55,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:29:55,898 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:29:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:29:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:55,898 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:29:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-03T23:29:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:29:55,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-03T23:29:55,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-03T23:29:55,900 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:29:55,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-03T23:29:55,901 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:29:55,903 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:29:55,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742179_1355 (size=215) 2024-12-03T23:29:55,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742179_1355 (size=215) 2024-12-03T23:29:55,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742179_1355 (size=215) 2024-12-03T23:29:55,911 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:29:55,911 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 44fc121d055419da5089ab46382aee4c}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7}] 2024-12-03T23:29:55,912 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:55,912 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:56,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-03T23:29:56,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-12-03T23:29:56,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-12-03T23:29:56,066 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:29:56,066 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:29:56,066 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 44fc121d055419da5089ab46382aee4c: 2024-12-03T23:29:56,066 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for cb8fd3807df9534ef4d2d133dcd44fc7: 2024-12-03T23:29:56,066 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-03T23:29:56,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-03T23:29:56,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:56,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:29:56,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:56,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:29:56,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:29:56,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:29:56,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742180_1356 (size=86) 2024-12-03T23:29:56,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742180_1356 (size=86) 2024-12-03T23:29:56,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742180_1356 (size=86) 2024-12-03T23:29:56,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:29:56,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-03T23:29:56,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-12-03T23:29:56,077 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:56,077 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:56,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7 in 167 msec 2024-12-03T23:29:56,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742181_1357 (size=86) 2024-12-03T23:29:56,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742181_1357 (size=86) 2024-12-03T23:29:56,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742181_1357 (size=86) 2024-12-03T23:29:56,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:29:56,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-12-03T23:29:56,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-12-03T23:29:56,088 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:56,089 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:56,091 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=138, resume processing ppid=137 2024-12-03T23:29:56,091 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:29:56,091 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 44fc121d055419da5089ab46382aee4c in 178 msec 2024-12-03T23:29:56,091 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:29:56,092 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:29:56,092 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:29:56,092 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:56,092 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-03T23:29:56,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742182_1358 (size=78) 2024-12-03T23:29:56,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742182_1358 (size=78) 2024-12-03T23:29:56,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742182_1358 (size=78) 2024-12-03T23:29:56,099 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:29:56,099 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:56,100 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:56,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742183_1359 (size=713) 2024-12-03T23:29:56,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742183_1359 (size=713) 2024-12-03T23:29:56,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742183_1359 (size=713) 2024-12-03T23:29:56,113 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:29:56,118 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:29:56,118 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:56,119 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:29:56,119 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-03T23:29:56,121 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 220 msec 2024-12-03T23:29:56,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-03T23:29:56,219 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-03T23:29:56,225 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:29:56,226 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37351 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:29:56,227 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-03T23:29:56,230 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:56,230 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:29:56,231 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:29:56,232 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-03T23:29:56,239 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-03T23:29:56,246 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-03T23:29:56,250 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-03T23:29:56,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268596250 (current time:1733268596250). 2024-12-03T23:29:56,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:29:56,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-03T23:29:56,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:29:56,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22410490, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:56,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:29:56,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:29:56,252 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:29:56,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:29:56,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:29:56,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68bc37bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:56,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:29:56,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:29:56,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:56,254 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39576, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:29:56,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7746e604, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:56,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:29:56,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:29:56,256 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:29:56,257 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60170, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:29:56,258 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:29:56,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:29:56,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:56,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:56,258 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:29:56,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25302c80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:56,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:29:56,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:29:56,259 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:29:56,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:29:56,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:29:56,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b07c9ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:56,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:29:56,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:29:56,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:56,261 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39586, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:29:56,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fca2a37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:56,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:29:56,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:29:56,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:29:56,264 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60184, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:29:56,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:29:56,268 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:29:56,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:29:56,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:56,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:56,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-03T23:29:56,269 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:29:56,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:29:56,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-03T23:29:56,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-03T23:29:56,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-03T23:29:56,271 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:29:56,272 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:29:56,274 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:29:56,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742184_1360 (size=210) 2024-12-03T23:29:56,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742184_1360 (size=210) 2024-12-03T23:29:56,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742184_1360 (size=210) 2024-12-03T23:29:56,294 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:29:56,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 44fc121d055419da5089ab46382aee4c}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7}] 2024-12-03T23:29:56,299 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:56,299 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:56,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-03T23:29:56,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-12-03T23:29:56,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-03T23:29:56,451 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:29:56,451 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:29:56,451 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 44fc121d055419da5089ab46382aee4c 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-03T23:29:56,451 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing cb8fd3807df9534ef4d2d133dcd44fc7 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-03T23:29:56,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203fb39ee1cf9cc49bfad2f8e1fdba375f8_44fc121d055419da5089ab46382aee4c is 71, key is 0be11ed488bc0528840058314beb84c5/cf:q/1733268596225/Put/seqid=0 2024-12-03T23:29:56,468 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412031c48078c116840e680be56bb13a6d227_cb8fd3807df9534ef4d2d133dcd44fc7 is 71, key is 119f752ab4c2915d446e81f14fa337d6/cf:q/1733268596226/Put/seqid=0 2024-12-03T23:29:56,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742185_1361 (size=5102) 2024-12-03T23:29:56,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742185_1361 (size=5102) 2024-12-03T23:29:56,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742185_1361 (size=5102) 2024-12-03T23:29:56,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742186_1362 (size=8171) 2024-12-03T23:29:56,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:56,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742186_1362 (size=8171) 2024-12-03T23:29:56,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742186_1362 (size=8171) 2024-12-03T23:29:56,475 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:56,478 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203fb39ee1cf9cc49bfad2f8e1fdba375f8_44fc121d055419da5089ab46382aee4c to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241203fb39ee1cf9cc49bfad2f8e1fdba375f8_44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:56,479 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412031c48078c116840e680be56bb13a6d227_cb8fd3807df9534ef4d2d133dcd44fc7 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412031c48078c116840e680be56bb13a6d227_cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:56,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/.tmp/cf/27c20189367a4626a09b4e96e547cf1a, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=44fc121d055419da5089ab46382aee4c] 2024-12-03T23:29:56,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/.tmp/cf/f7b0deb481ea488cbe6cfa42358d682a, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=cb8fd3807df9534ef4d2d133dcd44fc7] 2024-12-03T23:29:56,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/.tmp/cf/27c20189367a4626a09b4e96e547cf1a is 224, key is 062eead516b7da64c6425e66c710254ee/cf:q/1733268596225/Put/seqid=0 2024-12-03T23:29:56,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/.tmp/cf/f7b0deb481ea488cbe6cfa42358d682a is 224, key is 1b311e7152d10159c23ab028b8c7b3da8/cf:q/1733268596226/Put/seqid=0 2024-12-03T23:29:56,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742187_1363 (size=15717) 2024-12-03T23:29:56,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742187_1363 (size=15717) 2024-12-03T23:29:56,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742187_1363 (size=15717) 2024-12-03T23:29:56,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742188_1364 (size=5978) 2024-12-03T23:29:56,499 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/.tmp/cf/f7b0deb481ea488cbe6cfa42358d682a 2024-12-03T23:29:56,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742188_1364 (size=5978) 2024-12-03T23:29:56,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742188_1364 (size=5978) 2024-12-03T23:29:56,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/.tmp/cf/f7b0deb481ea488cbe6cfa42358d682a as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/cf/f7b0deb481ea488cbe6cfa42358d682a 2024-12-03T23:29:56,508 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/cf/f7b0deb481ea488cbe6cfa42358d682a, entries=47, sequenceid=6, filesize=15.3 K 2024-12-03T23:29:56,509 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for cb8fd3807df9534ef4d2d133dcd44fc7 in 58ms, sequenceid=6, compaction requested=false 2024-12-03T23:29:56,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-03T23:29:56,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for cb8fd3807df9534ef4d2d133dcd44fc7: 2024-12-03T23:29:56,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-03T23:29:56,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:56,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:29:56,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/cf/f7b0deb481ea488cbe6cfa42358d682a] hfiles 2024-12-03T23:29:56,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/cf/f7b0deb481ea488cbe6cfa42358d682a for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:56,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742189_1365 (size=125) 2024-12-03T23:29:56,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742189_1365 (size=125) 2024-12-03T23:29:56,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742189_1365 (size=125) 2024-12-03T23:29:56,516 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:29:56,516 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-03T23:29:56,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-12-03T23:29:56,517 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:56,517 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:56,519 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7 in 224 msec 2024-12-03T23:29:56,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-03T23:29:56,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-03T23:29:56,899 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/.tmp/cf/27c20189367a4626a09b4e96e547cf1a 2024-12-03T23:29:56,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/.tmp/cf/27c20189367a4626a09b4e96e547cf1a as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/cf/27c20189367a4626a09b4e96e547cf1a 2024-12-03T23:29:56,908 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/cf/27c20189367a4626a09b4e96e547cf1a, entries=3, sequenceid=6, filesize=5.8 K 2024-12-03T23:29:56,909 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 44fc121d055419da5089ab46382aee4c in 458ms, sequenceid=6, compaction requested=false 2024-12-03T23:29:56,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 44fc121d055419da5089ab46382aee4c: 2024-12-03T23:29:56,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-03T23:29:56,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:56,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:29:56,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/cf/27c20189367a4626a09b4e96e547cf1a] hfiles 2024-12-03T23:29:56,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/cf/27c20189367a4626a09b4e96e547cf1a for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:56,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742190_1366 (size=125) 2024-12-03T23:29:56,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742190_1366 (size=125) 2024-12-03T23:29:56,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742190_1366 (size=125) 2024-12-03T23:29:56,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:29:56,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-03T23:29:56,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-12-03T23:29:56,918 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:56,918 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:56,920 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=141, resume processing ppid=140 2024-12-03T23:29:56,920 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 44fc121d055419da5089ab46382aee4c in 625 msec 2024-12-03T23:29:56,920 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:29:56,921 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:29:56,921 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:29:56,921 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:29:56,922 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:56,922 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412031c48078c116840e680be56bb13a6d227_cb8fd3807df9534ef4d2d133dcd44fc7, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241203fb39ee1cf9cc49bfad2f8e1fdba375f8_44fc121d055419da5089ab46382aee4c] hfiles 2024-12-03T23:29:56,923 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412031c48078c116840e680be56bb13a6d227_cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:29:56,923 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241203fb39ee1cf9cc49bfad2f8e1fdba375f8_44fc121d055419da5089ab46382aee4c 2024-12-03T23:29:56,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742191_1367 (size=309) 2024-12-03T23:29:56,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742191_1367 (size=309) 2024-12-03T23:29:56,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742191_1367 (size=309) 2024-12-03T23:29:56,929 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:29:56,929 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:56,930 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:56,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742192_1368 (size=1023) 2024-12-03T23:29:56,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742192_1368 (size=1023) 2024-12-03T23:29:56,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742192_1368 (size=1023) 2024-12-03T23:29:56,944 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:29:56,949 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:29:56,949 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:29:56,951 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:29:56,951 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-03T23:29:56,952 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 682 msec 2024-12-03T23:29:57,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-03T23:29:57,409 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-03T23:29:57,412 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45664, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T23:29:57,412 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42992, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T23:29:57,412 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60196, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T23:29:57,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:29:57,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:29:57,416 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:29:57,416 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:57,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-12-03T23:29:57,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-03T23:29:57,416 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:29:57,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742193_1369 (size=399) 2024-12-03T23:29:57,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742193_1369 (size=399) 2024-12-03T23:29:57,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742193_1369 (size=399) 2024-12-03T23:29:57,424 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 8264560b9123327c4d17ce9a7a623e39, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:29:57,424 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d1b9eb3a4cc2f57ef7032cb8ee42dc1b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:29:57,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742195_1371 (size=85) 2024-12-03T23:29:57,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742195_1371 (size=85) 2024-12-03T23:29:57,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742195_1371 (size=85) 2024-12-03T23:29:57,430 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:29:57,430 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing d1b9eb3a4cc2f57ef7032cb8ee42dc1b, disabling compactions & flushes 2024-12-03T23:29:57,430 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. 2024-12-03T23:29:57,430 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. 2024-12-03T23:29:57,430 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. after waiting 0 ms 2024-12-03T23:29:57,430 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. 2024-12-03T23:29:57,430 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. 2024-12-03T23:29:57,430 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for d1b9eb3a4cc2f57ef7032cb8ee42dc1b: Waiting for close lock at 1733268597430Disabling compacts and flushes for region at 1733268597430Disabling writes for close at 1733268597430Writing region close event to WAL at 1733268597430Closed at 1733268597430 2024-12-03T23:29:57,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742194_1370 (size=85) 2024-12-03T23:29:57,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742194_1370 (size=85) 2024-12-03T23:29:57,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742194_1370 (size=85) 2024-12-03T23:29:57,438 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:29:57,438 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 8264560b9123327c4d17ce9a7a623e39, disabling compactions & flushes 2024-12-03T23:29:57,438 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. 2024-12-03T23:29:57,438 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. 2024-12-03T23:29:57,438 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. after waiting 0 ms 2024-12-03T23:29:57,438 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. 2024-12-03T23:29:57,438 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. 2024-12-03T23:29:57,438 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 8264560b9123327c4d17ce9a7a623e39: Waiting for close lock at 1733268597438Disabling compacts and flushes for region at 1733268597438Disabling writes for close at 1733268597438Writing region close event to WAL at 1733268597438Closed at 1733268597438 2024-12-03T23:29:57,439 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:29:57,439 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733268597439"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268597439"}]},"ts":"1733268597439"} 2024-12-03T23:29:57,440 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733268597439"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268597439"}]},"ts":"1733268597439"} 2024-12-03T23:29:57,442 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-03T23:29:57,443 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:29:57,443 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268597443"}]},"ts":"1733268597443"} 2024-12-03T23:29:57,445 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-03T23:29:57,445 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:29:57,446 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:29:57,446 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:29:57,446 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:29:57,446 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:29:57,446 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:29:57,446 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:29:57,446 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:29:57,446 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:29:57,446 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:29:57,446 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:29:57,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d1b9eb3a4cc2f57ef7032cb8ee42dc1b, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8264560b9123327c4d17ce9a7a623e39, ASSIGN}] 2024-12-03T23:29:57,448 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8264560b9123327c4d17ce9a7a623e39, ASSIGN 2024-12-03T23:29:57,448 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d1b9eb3a4cc2f57ef7032cb8ee42dc1b, ASSIGN 2024-12-03T23:29:57,448 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d1b9eb3a4cc2f57ef7032cb8ee42dc1b, ASSIGN; state=OFFLINE, location=ed8014902436,39085,1733268370401; forceNewPlan=false, retain=false 2024-12-03T23:29:57,448 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8264560b9123327c4d17ce9a7a623e39, ASSIGN; state=OFFLINE, location=ed8014902436,37351,1733268370600; forceNewPlan=false, retain=false 2024-12-03T23:29:57,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-03T23:29:57,529 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000003/launch_container.sh] 2024-12-03T23:29:57,529 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000003/container_tokens] 2024-12-03T23:29:57,529 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000003/sysfs] 2024-12-03T23:29:57,599 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:29:57,599 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=d1b9eb3a4cc2f57ef7032cb8ee42dc1b, regionState=OPENING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:29:57,599 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=8264560b9123327c4d17ce9a7a623e39, regionState=OPENING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:29:57,601 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8264560b9123327c4d17ce9a7a623e39, ASSIGN because future has completed 2024-12-03T23:29:57,601 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8264560b9123327c4d17ce9a7a623e39, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:29:57,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d1b9eb3a4cc2f57ef7032cb8ee42dc1b, ASSIGN because future has completed 2024-12-03T23:29:57,602 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure d1b9eb3a4cc2f57ef7032cb8ee42dc1b, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:29:57,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-03T23:29:57,755 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. 2024-12-03T23:29:57,755 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 8264560b9123327c4d17ce9a7a623e39, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39.', STARTKEY => '2', ENDKEY => ''} 2024-12-03T23:29:57,755 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. 2024-12-03T23:29:57,756 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => d1b9eb3a4cc2f57ef7032cb8ee42dc1b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b.', STARTKEY => '', ENDKEY => '2'} 2024-12-03T23:29:57,756 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. service=AccessControlService 2024-12-03T23:29:57,756 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. service=AccessControlService 2024-12-03T23:29:57,756 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:29:57,756 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:29:57,756 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:29:57,756 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:29:57,756 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:29:57,756 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:29:57,756 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:29:57,756 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:29:57,756 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:29:57,756 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:29:57,757 INFO [StoreOpener-8264560b9123327c4d17ce9a7a623e39-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:29:57,757 INFO [StoreOpener-d1b9eb3a4cc2f57ef7032cb8ee42dc1b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:29:57,759 INFO [StoreOpener-8264560b9123327c4d17ce9a7a623e39-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8264560b9123327c4d17ce9a7a623e39 columnFamilyName cf 2024-12-03T23:29:57,759 INFO [StoreOpener-d1b9eb3a4cc2f57ef7032cb8ee42dc1b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d1b9eb3a4cc2f57ef7032cb8ee42dc1b columnFamilyName cf 2024-12-03T23:29:57,759 DEBUG [StoreOpener-8264560b9123327c4d17ce9a7a623e39-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:57,759 DEBUG [StoreOpener-d1b9eb3a4cc2f57ef7032cb8ee42dc1b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:57,759 INFO [StoreOpener-8264560b9123327c4d17ce9a7a623e39-1 {}] regionserver.HStore(327): Store=8264560b9123327c4d17ce9a7a623e39/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:29:57,759 INFO [StoreOpener-d1b9eb3a4cc2f57ef7032cb8ee42dc1b-1 {}] regionserver.HStore(327): Store=d1b9eb3a4cc2f57ef7032cb8ee42dc1b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:29:57,759 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:29:57,759 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:29:57,760 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:29:57,760 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:29:57,760 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:29:57,760 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:29:57,760 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:29:57,760 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:29:57,760 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:29:57,760 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:29:57,761 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:29:57,761 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:29:57,763 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:29:57,763 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:29:57,763 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened d1b9eb3a4cc2f57ef7032cb8ee42dc1b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71052015, jitterRate=0.05875752866268158}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:29:57,763 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 8264560b9123327c4d17ce9a7a623e39; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71240162, jitterRate=0.061561137437820435}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:29:57,763 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:29:57,763 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:29:57,764 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for d1b9eb3a4cc2f57ef7032cb8ee42dc1b: Running coprocessor pre-open hook at 1733268597756Writing region info on filesystem at 1733268597756Initializing all the Stores at 1733268597757 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268597757Cleaning up temporary data from old regions at 1733268597760 (+3 ms)Running coprocessor post-open hooks at 1733268597763 (+3 ms)Region opened successfully at 1733268597764 (+1 ms) 2024-12-03T23:29:57,764 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 8264560b9123327c4d17ce9a7a623e39: Running coprocessor pre-open hook at 1733268597756Writing region info on filesystem at 1733268597756Initializing all the Stores at 1733268597757 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268597757Cleaning up temporary data from old regions at 1733268597760 (+3 ms)Running coprocessor post-open hooks at 1733268597763 (+3 ms)Region opened successfully at 1733268597764 (+1 ms) 2024-12-03T23:29:57,764 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39., pid=146, masterSystemTime=1733268597753 2024-12-03T23:29:57,764 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b., pid=147, masterSystemTime=1733268597753 2024-12-03T23:29:57,766 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. 2024-12-03T23:29:57,766 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. 2024-12-03T23:29:57,766 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=8264560b9123327c4d17ce9a7a623e39, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:29:57,766 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. 2024-12-03T23:29:57,766 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. 2024-12-03T23:29:57,767 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=d1b9eb3a4cc2f57ef7032cb8ee42dc1b, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:29:57,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8264560b9123327c4d17ce9a7a623e39, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:29:57,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure d1b9eb3a4cc2f57ef7032cb8ee42dc1b, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:29:57,770 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=145 2024-12-03T23:29:57,770 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure 8264560b9123327c4d17ce9a7a623e39, server=ed8014902436,37351,1733268370600 in 167 msec 2024-12-03T23:29:57,771 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=144 2024-12-03T23:29:57,771 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8264560b9123327c4d17ce9a7a623e39, ASSIGN in 323 msec 2024-12-03T23:29:57,771 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure d1b9eb3a4cc2f57ef7032cb8ee42dc1b, server=ed8014902436,39085,1733268370401 in 167 msec 2024-12-03T23:29:57,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=144, resume processing ppid=143 2024-12-03T23:29:57,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d1b9eb3a4cc2f57ef7032cb8ee42dc1b, ASSIGN in 324 msec 2024-12-03T23:29:57,773 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:29:57,773 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268597773"}]},"ts":"1733268597773"} 2024-12-03T23:29:57,774 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-03T23:29:57,775 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:29:57,775 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-03T23:29:57,778 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-03T23:29:57,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:57,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:57,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:57,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:29:57,826 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:57,826 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:57,826 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:57,826 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:57,826 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:57,826 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:57,826 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:57,826 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-03T23:29:57,827 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 411 msec 2024-12-03T23:29:58,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-03T23:29:58,040 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-03T23:29:58,042 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b., hostname=ed8014902436,39085,1733268370401, seqNum=2] 2024-12-03T23:29:58,045 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:29:58,047 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-12-03T23:29:58,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [d1b9eb3a4cc2f57ef7032cb8ee42dc1b, 8264560b9123327c4d17ce9a7a623e39] 2024-12-03T23:29:58,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[d1b9eb3a4cc2f57ef7032cb8ee42dc1b, 8264560b9123327c4d17ce9a7a623e39], force=true 2024-12-03T23:29:58,064 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[d1b9eb3a4cc2f57ef7032cb8ee42dc1b, 8264560b9123327c4d17ce9a7a623e39], force=true 2024-12-03T23:29:58,064 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[d1b9eb3a4cc2f57ef7032cb8ee42dc1b, 8264560b9123327c4d17ce9a7a623e39], force=true 2024-12-03T23:29:58,064 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[d1b9eb3a4cc2f57ef7032cb8ee42dc1b, 8264560b9123327c4d17ce9a7a623e39], force=true 2024-12-03T23:29:58,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-03T23:29:58,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d1b9eb3a4cc2f57ef7032cb8ee42dc1b, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8264560b9123327c4d17ce9a7a623e39, UNASSIGN}] 2024-12-03T23:29:58,071 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d1b9eb3a4cc2f57ef7032cb8ee42dc1b, UNASSIGN 2024-12-03T23:29:58,071 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8264560b9123327c4d17ce9a7a623e39, UNASSIGN 2024-12-03T23:29:58,072 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=8264560b9123327c4d17ce9a7a623e39, regionState=CLOSING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:29:58,072 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=d1b9eb3a4cc2f57ef7032cb8ee42dc1b, regionState=CLOSING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:29:58,074 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8264560b9123327c4d17ce9a7a623e39, UNASSIGN because future has completed 2024-12-03T23:29:58,074 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-03T23:29:58,074 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8264560b9123327c4d17ce9a7a623e39, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:29:58,074 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d1b9eb3a4cc2f57ef7032cb8ee42dc1b, UNASSIGN because future has completed 2024-12-03T23:29:58,075 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-03T23:29:58,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure d1b9eb3a4cc2f57ef7032cb8ee42dc1b, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:29:58,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-03T23:29:58,226 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close 8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:29:58,226 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:29:58,227 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-03T23:29:58,227 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-03T23:29:58,227 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing d1b9eb3a4cc2f57ef7032cb8ee42dc1b, disabling compactions & flushes 2024-12-03T23:29:58,227 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing 8264560b9123327c4d17ce9a7a623e39, disabling compactions & flushes 2024-12-03T23:29:58,227 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. 2024-12-03T23:29:58,227 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. 2024-12-03T23:29:58,227 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. 2024-12-03T23:29:58,227 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. 2024-12-03T23:29:58,227 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. after waiting 0 ms 2024-12-03T23:29:58,227 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. after waiting 0 ms 2024-12-03T23:29:58,227 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. 2024-12-03T23:29:58,227 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. 2024-12-03T23:29:58,227 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing 8264560b9123327c4d17ce9a7a623e39 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-03T23:29:58,227 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing d1b9eb3a4cc2f57ef7032cb8ee42dc1b 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-03T23:29:58,256 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/.tmp/cf/c58bc2dcd78c4f04a375f5d691963020 is 28, key is 1/cf:/1733268598042/Put/seqid=0 2024-12-03T23:29:58,256 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/.tmp/cf/1b3924a59f314fb68bc589a1cb8e003c is 28, key is 2/cf:/1733268598046/Put/seqid=0 2024-12-03T23:29:58,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742197_1373 (size=4945) 2024-12-03T23:29:58,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742197_1373 (size=4945) 2024-12-03T23:29:58,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742197_1373 (size=4945) 2024-12-03T23:29:58,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742196_1372 (size=4945) 2024-12-03T23:29:58,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742196_1372 (size=4945) 2024-12-03T23:29:58,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742196_1372 (size=4945) 2024-12-03T23:29:58,262 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/.tmp/cf/c58bc2dcd78c4f04a375f5d691963020 2024-12-03T23:29:58,262 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/.tmp/cf/1b3924a59f314fb68bc589a1cb8e003c 2024-12-03T23:29:58,267 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/.tmp/cf/1b3924a59f314fb68bc589a1cb8e003c as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/cf/1b3924a59f314fb68bc589a1cb8e003c 2024-12-03T23:29:58,267 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/.tmp/cf/c58bc2dcd78c4f04a375f5d691963020 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/cf/c58bc2dcd78c4f04a375f5d691963020 2024-12-03T23:29:58,272 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/cf/1b3924a59f314fb68bc589a1cb8e003c, entries=1, sequenceid=5, filesize=4.8 K 2024-12-03T23:29:58,272 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/cf/c58bc2dcd78c4f04a375f5d691963020, entries=1, sequenceid=5, filesize=4.8 K 2024-12-03T23:29:58,273 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for d1b9eb3a4cc2f57ef7032cb8ee42dc1b in 45ms, sequenceid=5, compaction requested=false 2024-12-03T23:29:58,273 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-03T23:29:58,273 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 8264560b9123327c4d17ce9a7a623e39 in 46ms, sequenceid=5, compaction requested=false 2024-12-03T23:29:58,278 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-03T23:29:58,279 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:29:58,279 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. 2024-12-03T23:29:58,280 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for d1b9eb3a4cc2f57ef7032cb8ee42dc1b: Waiting for close lock at 1733268598227Running coprocessor pre-close hooks at 1733268598227Disabling compacts and flushes for region at 1733268598227Disabling writes for close at 1733268598227Obtaining lock to block concurrent updates at 1733268598227Preparing flush snapshotting stores in d1b9eb3a4cc2f57ef7032cb8ee42dc1b at 1733268598227Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733268598227Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b. at 1733268598228 (+1 ms)Flushing d1b9eb3a4cc2f57ef7032cb8ee42dc1b/cf: creating writer at 1733268598228Flushing d1b9eb3a4cc2f57ef7032cb8ee42dc1b/cf: appending metadata at 1733268598256 (+28 ms)Flushing d1b9eb3a4cc2f57ef7032cb8ee42dc1b/cf: closing flushed file at 1733268598256Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d5ac035: reopening flushed file at 1733268598266 (+10 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for d1b9eb3a4cc2f57ef7032cb8ee42dc1b in 45ms, sequenceid=5, compaction requested=false at 1733268598273 (+7 ms)Writing region close event to WAL at 1733268598274 (+1 ms)Running coprocessor post-close hooks at 1733268598279 (+5 ms)Closed at 1733268598279 2024-12-03T23:29:58,282 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:29:58,282 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-03T23:29:58,283 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:29:58,283 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=d1b9eb3a4cc2f57ef7032cb8ee42dc1b, regionState=CLOSED 2024-12-03T23:29:58,283 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. 2024-12-03T23:29:58,283 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for 8264560b9123327c4d17ce9a7a623e39: Waiting for close lock at 1733268598227Running coprocessor pre-close hooks at 1733268598227Disabling compacts and flushes for region at 1733268598227Disabling writes for close at 1733268598227Obtaining lock to block concurrent updates at 1733268598227Preparing flush snapshotting stores in 8264560b9123327c4d17ce9a7a623e39 at 1733268598227Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733268598227Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39. at 1733268598228 (+1 ms)Flushing 8264560b9123327c4d17ce9a7a623e39/cf: creating writer at 1733268598228Flushing 8264560b9123327c4d17ce9a7a623e39/cf: appending metadata at 1733268598256 (+28 ms)Flushing 8264560b9123327c4d17ce9a7a623e39/cf: closing flushed file at 1733268598256Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e1fd342: reopening flushed file at 1733268598266 (+10 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 8264560b9123327c4d17ce9a7a623e39 in 46ms, sequenceid=5, compaction requested=false at 1733268598273 (+7 ms)Writing region close event to WAL at 1733268598274 (+1 ms)Running coprocessor post-close hooks at 1733268598282 (+8 ms)Closed at 1733268598283 (+1 ms) 2024-12-03T23:29:58,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure d1b9eb3a4cc2f57ef7032cb8ee42dc1b, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:29:58,285 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed 8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:29:58,286 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=8264560b9123327c4d17ce9a7a623e39, regionState=CLOSED 2024-12-03T23:29:58,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8264560b9123327c4d17ce9a7a623e39, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:29:58,293 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=149 2024-12-03T23:29:58,294 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure d1b9eb3a4cc2f57ef7032cb8ee42dc1b, server=ed8014902436,39085,1733268370401 in 211 msec 2024-12-03T23:29:58,295 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-12-03T23:29:58,295 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure 8264560b9123327c4d17ce9a7a623e39, server=ed8014902436,37351,1733268370600 in 218 msec 2024-12-03T23:29:58,296 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d1b9eb3a4cc2f57ef7032cb8ee42dc1b, UNASSIGN in 222 msec 2024-12-03T23:29:58,298 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=150, resume processing ppid=148 2024-12-03T23:29:58,298 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8264560b9123327c4d17ce9a7a623e39, UNASSIGN in 224 msec 2024-12-03T23:29:58,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742198_1374 (size=84) 2024-12-03T23:29:58,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742198_1374 (size=84) 2024-12-03T23:29:58,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742198_1374 (size=84) 2024-12-03T23:29:58,322 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:58,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742199_1375 (size=20) 2024-12-03T23:29:58,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742199_1375 (size=20) 2024-12-03T23:29:58,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742199_1375 (size=20) 2024-12-03T23:29:58,334 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:58,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742200_1376 (size=21) 2024-12-03T23:29:58,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742200_1376 (size=21) 2024-12-03T23:29:58,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742200_1376 (size=21) 2024-12-03T23:29:58,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742201_1377 (size=84) 2024-12-03T23:29:58,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742201_1377 (size=84) 2024-12-03T23:29:58,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742201_1377 (size=84) 2024-12-03T23:29:58,357 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:58,365 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-03T23:29:58,367 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597413.d1b9eb3a4cc2f57ef7032cb8ee42dc1b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-03T23:29:58,367 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733268597413.8264560b9123327c4d17ce9a7a623e39.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-03T23:29:58,367 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-03T23:29:58,371 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0f2277f66421dd9546af949a5d3e0a94, ASSIGN}] 2024-12-03T23:29:58,372 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0f2277f66421dd9546af949a5d3e0a94, ASSIGN 2024-12-03T23:29:58,373 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0f2277f66421dd9546af949a5d3e0a94, ASSIGN; state=MERGED, location=ed8014902436,39085,1733268370401; forceNewPlan=false, retain=false 2024-12-03T23:29:58,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-03T23:29:58,523 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-03T23:29:58,524 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=0f2277f66421dd9546af949a5d3e0a94, regionState=OPENING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:29:58,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0f2277f66421dd9546af949a5d3e0a94, ASSIGN because future has completed 2024-12-03T23:29:58,526 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0f2277f66421dd9546af949a5d3e0a94, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:29:58,680 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94. 2024-12-03T23:29:58,681 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => 0f2277f66421dd9546af949a5d3e0a94, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94.', STARTKEY => '', ENDKEY => ''} 2024-12-03T23:29:58,681 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94. service=AccessControlService 2024-12-03T23:29:58,681 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:29:58,681 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:58,681 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:29:58,682 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:58,682 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:58,683 INFO [StoreOpener-0f2277f66421dd9546af949a5d3e0a94-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:58,683 INFO [StoreOpener-0f2277f66421dd9546af949a5d3e0a94-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0f2277f66421dd9546af949a5d3e0a94 columnFamilyName cf 2024-12-03T23:29:58,683 DEBUG [StoreOpener-0f2277f66421dd9546af949a5d3e0a94-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:29:58,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-03T23:29:58,691 DEBUG [StoreOpener-0f2277f66421dd9546af949a5d3e0a94-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/cf/1b3924a59f314fb68bc589a1cb8e003c.8264560b9123327c4d17ce9a7a623e39->hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/cf/1b3924a59f314fb68bc589a1cb8e003c-top 2024-12-03T23:29:58,696 DEBUG [StoreOpener-0f2277f66421dd9546af949a5d3e0a94-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/cf/c58bc2dcd78c4f04a375f5d691963020.d1b9eb3a4cc2f57ef7032cb8ee42dc1b->hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/cf/c58bc2dcd78c4f04a375f5d691963020-top 2024-12-03T23:29:58,696 INFO [StoreOpener-0f2277f66421dd9546af949a5d3e0a94-1 {}] regionserver.HStore(327): Store=0f2277f66421dd9546af949a5d3e0a94/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:29:58,696 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:58,697 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:58,698 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:58,699 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:58,699 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:58,700 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:58,701 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened 0f2277f66421dd9546af949a5d3e0a94; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69718354, jitterRate=0.03888443112373352}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:29:58,701 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:58,701 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for 0f2277f66421dd9546af949a5d3e0a94: Running coprocessor pre-open hook at 1733268598682Writing region info on filesystem at 1733268598682Initializing all the Stores at 1733268598682Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268598682Cleaning up temporary data from old regions at 1733268598699 (+17 ms)Running coprocessor post-open hooks at 1733268598701 (+2 ms)Region opened successfully at 1733268598701 2024-12-03T23:29:58,702 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94., pid=154, masterSystemTime=1733268598678 2024-12-03T23:29:58,702 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94.,because compaction is disabled. 2024-12-03T23:29:58,703 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94. 2024-12-03T23:29:58,703 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94. 2024-12-03T23:29:58,704 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=0f2277f66421dd9546af949a5d3e0a94, regionState=OPEN, openSeqNum=9, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:29:58,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0f2277f66421dd9546af949a5d3e0a94, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:29:58,708 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-12-03T23:29:58,708 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure 0f2277f66421dd9546af949a5d3e0a94, server=ed8014902436,39085,1733268370401 in 180 msec 2024-12-03T23:29:58,709 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-12-03T23:29:58,709 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0f2277f66421dd9546af949a5d3e0a94, ASSIGN in 337 msec 2024-12-03T23:29:58,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[d1b9eb3a4cc2f57ef7032cb8ee42dc1b, 8264560b9123327c4d17ce9a7a623e39], force=true in 649 msec 2024-12-03T23:29:58,717 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0006_000001 (auth:SIMPLE) from 127.0.0.1:49162 2024-12-03T23:29:58,728 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000001/launch_container.sh] 2024-12-03T23:29:58,728 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000001/container_tokens] 2024-12-03T23:29:58,728 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0006/container_1733268377251_0006_01_000001/sysfs] 2024-12-03T23:29:59,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-03T23:29:59,200 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-03T23:29:59,201 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-03T23:29:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268599201 (current time:1733268599201). 2024-12-03T23:29:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:29:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-03T23:29:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:29:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65df0e67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:29:59,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:29:59,202 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:29:59,202 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:29:59,203 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:29:59,203 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b90fe64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:59,203 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:29:59,203 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:29:59,203 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:59,204 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39594, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:29:59,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d3867b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:29:59,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:29:59,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:29:59,206 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60208, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:29:59,207 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:29:59,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:29:59,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:59,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:59,207 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:29:59,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683a0bb6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:59,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:29:59,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:29:59,209 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:29:59,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:29:59,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:29:59,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@373344df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:59,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:29:59,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:29:59,210 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:59,210 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39612, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:29:59,210 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45d458ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:29:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:29:59,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:29:59,212 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:29:59,212 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60224, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:29:59,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:29:59,215 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:29:59,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:29:59,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:59,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:29:59,215 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:29:59,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-03T23:29:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:29:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-03T23:29:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-03T23:29:59,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-03T23:29:59,218 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:29:59,219 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:29:59,221 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:29:59,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742202_1378 (size=216) 2024-12-03T23:29:59,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742202_1378 (size=216) 2024-12-03T23:29:59,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742202_1378 (size=216) 2024-12-03T23:29:59,229 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:29:59,229 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0f2277f66421dd9546af949a5d3e0a94}] 2024-12-03T23:29:59,230 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:59,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-03T23:29:59,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-12-03T23:29:59,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94. 2024-12-03T23:29:59,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for 0f2277f66421dd9546af949a5d3e0a94: 2024-12-03T23:29:59,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-03T23:29:59,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:29:59,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:29:59,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/cf/1b3924a59f314fb68bc589a1cb8e003c.8264560b9123327c4d17ce9a7a623e39->hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/cf/1b3924a59f314fb68bc589a1cb8e003c-top, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/cf/c58bc2dcd78c4f04a375f5d691963020.d1b9eb3a4cc2f57ef7032cb8ee42dc1b->hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/cf/c58bc2dcd78c4f04a375f5d691963020-top] hfiles 2024-12-03T23:29:59,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/cf/1b3924a59f314fb68bc589a1cb8e003c.8264560b9123327c4d17ce9a7a623e39 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:29:59,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/cf/c58bc2dcd78c4f04a375f5d691963020.d1b9eb3a4cc2f57ef7032cb8ee42dc1b for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:29:59,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742203_1379 (size=269) 2024-12-03T23:29:59,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742203_1379 (size=269) 2024-12-03T23:29:59,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742203_1379 (size=269) 2024-12-03T23:29:59,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94. 2024-12-03T23:29:59,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-03T23:29:59,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-12-03T23:29:59,389 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:59,390 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:29:59,392 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-12-03T23:29:59,392 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:29:59,392 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0f2277f66421dd9546af949a5d3e0a94 in 161 msec 2024-12-03T23:29:59,392 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:29:59,393 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:29:59,393 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:29:59,393 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:29:59,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742204_1380 (size=670) 2024-12-03T23:29:59,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742204_1380 (size=670) 2024-12-03T23:29:59,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742204_1380 (size=670) 2024-12-03T23:29:59,406 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:29:59,412 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:29:59,412 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:29:59,413 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:29:59,413 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-03T23:29:59,415 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 198 msec 2024-12-03T23:29:59,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-03T23:29:59,530 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-03T23:29:59,530 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268599530 2024-12-03T23:29:59,531 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37331, tgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268599530, rawTgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268599530, srcFsUri=hdfs://localhost:37331, srcDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:29:59,556 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37331, inputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:29:59,556 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268599530, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268599530/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:29:59,557 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-03T23:29:59,561 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268599530/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:29:59,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742205_1381 (size=216) 2024-12-03T23:29:59,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742205_1381 (size=216) 2024-12-03T23:29:59,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742206_1382 (size=670) 2024-12-03T23:29:59,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742206_1382 (size=670) 2024-12-03T23:29:59,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742206_1382 (size=670) 2024-12-03T23:29:59,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742205_1381 (size=216) 2024-12-03T23:29:59,573 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:59,573 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:29:59,573 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:00,102 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:30:00,135 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:00,135 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-03T23:30:00,136 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:00,136 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-03T23:30:00,136 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-03T23:30:00,428 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-16413567153109326195.jar 2024-12-03T23:30:00,428 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:00,429 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:00,485 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-15159819342557098794.jar 2024-12-03T23:30:00,486 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:00,486 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:00,487 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:00,487 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:00,487 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:00,488 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:00,488 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-03T23:30:00,488 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-03T23:30:00,488 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-03T23:30:00,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-03T23:30:00,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-03T23:30:00,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-03T23:30:00,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-03T23:30:00,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-03T23:30:00,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-03T23:30:00,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-03T23:30:00,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-03T23:30:00,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:30:00,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:30:00,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:30:00,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:30:00,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:30:00,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:30:00,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:30:00,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742207_1383 (size=131440) 2024-12-03T23:30:00,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742207_1383 (size=131440) 2024-12-03T23:30:00,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742207_1383 (size=131440) 2024-12-03T23:30:00,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742208_1384 (size=4188619) 2024-12-03T23:30:00,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742208_1384 (size=4188619) 2024-12-03T23:30:00,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742208_1384 (size=4188619) 2024-12-03T23:30:00,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742209_1385 (size=6424741) 2024-12-03T23:30:00,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742209_1385 (size=6424741) 2024-12-03T23:30:00,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742209_1385 (size=6424741) 2024-12-03T23:30:00,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742210_1386 (size=1323991) 2024-12-03T23:30:00,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742210_1386 (size=1323991) 2024-12-03T23:30:00,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742210_1386 (size=1323991) 2024-12-03T23:30:00,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742211_1387 (size=903924) 2024-12-03T23:30:00,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742211_1387 (size=903924) 2024-12-03T23:30:00,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742211_1387 (size=903924) 2024-12-03T23:30:00,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742212_1388 (size=8360360) 2024-12-03T23:30:00,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742212_1388 (size=8360360) 2024-12-03T23:30:00,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742212_1388 (size=8360360) 2024-12-03T23:30:00,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742213_1389 (size=443172) 2024-12-03T23:30:00,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742213_1389 (size=443172) 2024-12-03T23:30:00,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742213_1389 (size=443172) 2024-12-03T23:30:00,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742214_1390 (size=1877034) 2024-12-03T23:30:00,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742214_1390 (size=1877034) 2024-12-03T23:30:00,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742214_1390 (size=1877034) 2024-12-03T23:30:00,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742215_1391 (size=77835) 2024-12-03T23:30:00,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742215_1391 (size=77835) 2024-12-03T23:30:00,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742215_1391 (size=77835) 2024-12-03T23:30:00,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742216_1392 (size=30949) 2024-12-03T23:30:00,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742216_1392 (size=30949) 2024-12-03T23:30:00,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742216_1392 (size=30949) 2024-12-03T23:30:00,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742217_1393 (size=1597213) 2024-12-03T23:30:00,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742217_1393 (size=1597213) 2024-12-03T23:30:00,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742217_1393 (size=1597213) 2024-12-03T23:30:00,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742218_1394 (size=4695811) 2024-12-03T23:30:00,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742218_1394 (size=4695811) 2024-12-03T23:30:00,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742218_1394 (size=4695811) 2024-12-03T23:30:00,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742219_1395 (size=232957) 2024-12-03T23:30:00,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742219_1395 (size=232957) 2024-12-03T23:30:00,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742219_1395 (size=232957) 2024-12-03T23:30:00,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742220_1396 (size=127628) 2024-12-03T23:30:00,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742220_1396 (size=127628) 2024-12-03T23:30:00,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742220_1396 (size=127628) 2024-12-03T23:30:01,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742221_1397 (size=20406) 2024-12-03T23:30:01,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742221_1397 (size=20406) 2024-12-03T23:30:01,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742221_1397 (size=20406) 2024-12-03T23:30:01,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742222_1398 (size=5175431) 2024-12-03T23:30:01,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742222_1398 (size=5175431) 2024-12-03T23:30:01,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742222_1398 (size=5175431) 2024-12-03T23:30:01,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742223_1399 (size=217634) 2024-12-03T23:30:01,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742223_1399 (size=217634) 2024-12-03T23:30:01,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742223_1399 (size=217634) 2024-12-03T23:30:01,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742224_1400 (size=1832290) 2024-12-03T23:30:01,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742224_1400 (size=1832290) 2024-12-03T23:30:01,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742224_1400 (size=1832290) 2024-12-03T23:30:01,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742225_1401 (size=322274) 2024-12-03T23:30:01,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742225_1401 (size=322274) 2024-12-03T23:30:01,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742225_1401 (size=322274) 2024-12-03T23:30:01,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742226_1402 (size=503880) 2024-12-03T23:30:01,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742226_1402 (size=503880) 2024-12-03T23:30:01,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742226_1402 (size=503880) 2024-12-03T23:30:01,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742227_1403 (size=29229) 2024-12-03T23:30:01,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742227_1403 (size=29229) 2024-12-03T23:30:01,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742227_1403 (size=29229) 2024-12-03T23:30:01,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742228_1404 (size=24096) 2024-12-03T23:30:01,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742228_1404 (size=24096) 2024-12-03T23:30:01,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742228_1404 (size=24096) 2024-12-03T23:30:01,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742229_1405 (size=111872) 2024-12-03T23:30:01,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742229_1405 (size=111872) 2024-12-03T23:30:01,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742229_1405 (size=111872) 2024-12-03T23:30:01,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742230_1406 (size=45609) 2024-12-03T23:30:01,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742230_1406 (size=45609) 2024-12-03T23:30:01,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742230_1406 (size=45609) 2024-12-03T23:30:01,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742231_1407 (size=136454) 2024-12-03T23:30:01,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742231_1407 (size=136454) 2024-12-03T23:30:01,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742231_1407 (size=136454) 2024-12-03T23:30:01,101 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-03T23:30:01,103 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-03T23:30:01,105 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-12-03T23:30:01,105 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-12-03T23:30:01,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742232_1408 (size=481) 2024-12-03T23:30:01,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742232_1408 (size=481) 2024-12-03T23:30:01,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742232_1408 (size=481) 2024-12-03T23:30:01,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742233_1409 (size=21) 2024-12-03T23:30:01,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742233_1409 (size=21) 2024-12-03T23:30:01,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742233_1409 (size=21) 2024-12-03T23:30:01,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742234_1410 (size=304057) 2024-12-03T23:30:01,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742234_1410 (size=304057) 2024-12-03T23:30:01,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742234_1410 (size=304057) 2024-12-03T23:30:01,167 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:30:01,167 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:30:01,413 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0007_000001 (auth:SIMPLE) from 127.0.0.1:43194 2024-12-03T23:30:05,638 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:30:06,787 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0007_000001 (auth:SIMPLE) from 127.0.0.1:45938 2024-12-03T23:30:07,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742235_1411 (size=349755) 2024-12-03T23:30:07,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742235_1411 (size=349755) 2024-12-03T23:30:07,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742235_1411 (size=349755) 2024-12-03T23:30:08,449 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:30:09,060 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0007_000001 (auth:SIMPLE) from 127.0.0.1:54820 2024-12-03T23:30:09,061 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0007_000001 (auth:SIMPLE) from 127.0.0.1:48554 2024-12-03T23:30:12,475 WARN [regionserver/ed8014902436:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 3, running: 1 2024-12-03T23:30:13,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742236_1412 (size=4945) 2024-12-03T23:30:13,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742236_1412 (size=4945) 2024-12-03T23:30:13,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742236_1412 (size=4945) 2024-12-03T23:30:13,442 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0007/container_1733268377251_0007_01_000002/launch_container.sh] 2024-12-03T23:30:13,442 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0007/container_1733268377251_0007_01_000002/container_tokens] 2024-12-03T23:30:13,442 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0007/container_1733268377251_0007_01_000002/sysfs] 2024-12-03T23:30:14,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742238_1414 (size=4945) 2024-12-03T23:30:14,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742238_1414 (size=4945) 2024-12-03T23:30:14,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742238_1414 (size=4945) 2024-12-03T23:30:14,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742237_1413 (size=22246) 2024-12-03T23:30:14,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742237_1413 (size=22246) 2024-12-03T23:30:14,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742237_1413 (size=22246) 2024-12-03T23:30:14,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742239_1415 (size=482) 2024-12-03T23:30:14,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742239_1415 (size=482) 2024-12-03T23:30:14,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742239_1415 (size=482) 2024-12-03T23:30:14,552 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0007/container_1733268377251_0007_01_000003/launch_container.sh] 2024-12-03T23:30:14,552 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0007/container_1733268377251_0007_01_000003/container_tokens] 2024-12-03T23:30:14,552 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0007/container_1733268377251_0007_01_000003/sysfs] 2024-12-03T23:30:14,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742240_1416 (size=22246) 2024-12-03T23:30:14,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742240_1416 (size=22246) 2024-12-03T23:30:14,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742240_1416 (size=22246) 2024-12-03T23:30:14,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742241_1417 (size=349755) 2024-12-03T23:30:14,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742241_1417 (size=349755) 2024-12-03T23:30:14,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742241_1417 (size=349755) 2024-12-03T23:30:14,643 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0007_000001 (auth:SIMPLE) from 127.0.0.1:52494 2024-12-03T23:30:16,586 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-03T23:30:16,587 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-03T23:30:16,620 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:16,620 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-03T23:30:16,621 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-03T23:30:16,621 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:16,622 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-03T23:30:16,622 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-03T23:30:16,622 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268599530/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268599530/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:16,623 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268599530/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-03T23:30:16,623 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268599530/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-03T23:30:16,636 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:16,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:16,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-03T23:30:16,642 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268616642"}]},"ts":"1733268616642"} 2024-12-03T23:30:16,644 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-03T23:30:16,644 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-03T23:30:16,646 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-03T23:30:16,648 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0f2277f66421dd9546af949a5d3e0a94, UNASSIGN}] 2024-12-03T23:30:16,650 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0f2277f66421dd9546af949a5d3e0a94, UNASSIGN 2024-12-03T23:30:16,654 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=0f2277f66421dd9546af949a5d3e0a94, regionState=CLOSING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:30:16,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0f2277f66421dd9546af949a5d3e0a94, UNASSIGN because future has completed 2024-12-03T23:30:16,657 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:30:16,657 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0f2277f66421dd9546af949a5d3e0a94, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:30:16,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-03T23:30:16,811 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:30:16,811 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:30:16,811 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing 0f2277f66421dd9546af949a5d3e0a94, disabling compactions & flushes 2024-12-03T23:30:16,811 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94. 2024-12-03T23:30:16,812 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94. 2024-12-03T23:30:16,812 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94. after waiting 0 ms 2024-12-03T23:30:16,812 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94. 2024-12-03T23:30:16,816 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-03T23:30:16,819 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:30:16,819 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94. 2024-12-03T23:30:16,819 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for 0f2277f66421dd9546af949a5d3e0a94: Waiting for close lock at 1733268616811Running coprocessor pre-close hooks at 1733268616811Disabling compacts and flushes for region at 1733268616811Disabling writes for close at 1733268616812 (+1 ms)Writing region close event to WAL at 1733268616812Running coprocessor post-close hooks at 1733268616819 (+7 ms)Closed at 1733268616819 2024-12-03T23:30:16,823 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed 0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:30:16,824 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=0f2277f66421dd9546af949a5d3e0a94, regionState=CLOSED 2024-12-03T23:30:16,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0f2277f66421dd9546af949a5d3e0a94, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:30:16,835 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-12-03T23:30:16,835 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure 0f2277f66421dd9546af949a5d3e0a94, server=ed8014902436,39085,1733268370401 in 171 msec 2024-12-03T23:30:16,841 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-12-03T23:30:16,841 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0f2277f66421dd9546af949a5d3e0a94, UNASSIGN in 187 msec 2024-12-03T23:30:16,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-12-03T23:30:16,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 198 msec 2024-12-03T23:30:16,848 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268616848"}]},"ts":"1733268616848"} 2024-12-03T23:30:16,850 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-03T23:30:16,850 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-03T23:30:16,858 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 219 msec 2024-12-03T23:30:16,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-03T23:30:16,960 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-03T23:30:16,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:16,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:16,964 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:16,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:16,965 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:16,969 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:16,970 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:30:16,973 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/recovered.edits] 2024-12-03T23:30:16,977 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:30:16,977 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:30:16,978 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/recovered.edits] 2024-12-03T23:30:16,979 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/cf/1b3924a59f314fb68bc589a1cb8e003c.8264560b9123327c4d17ce9a7a623e39 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/cf/1b3924a59f314fb68bc589a1cb8e003c.8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:30:16,981 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/recovered.edits] 2024-12-03T23:30:17,011 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/cf/c58bc2dcd78c4f04a375f5d691963020.d1b9eb3a4cc2f57ef7032cb8ee42dc1b to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/cf/c58bc2dcd78c4f04a375f5d691963020.d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:30:17,013 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/cf/c58bc2dcd78c4f04a375f5d691963020 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/cf/c58bc2dcd78c4f04a375f5d691963020 2024-12-03T23:30:17,015 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/cf/1b3924a59f314fb68bc589a1cb8e003c to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/cf/1b3924a59f314fb68bc589a1cb8e003c 2024-12-03T23:30:17,018 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/recovered.edits/12.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94/recovered.edits/12.seqid 2024-12-03T23:30:17,026 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0f2277f66421dd9546af949a5d3e0a94 2024-12-03T23:30:17,027 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/recovered.edits/8.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b/recovered.edits/8.seqid 2024-12-03T23:30:17,028 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d1b9eb3a4cc2f57ef7032cb8ee42dc1b 2024-12-03T23:30:17,028 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/recovered.edits/8.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39/recovered.edits/8.seqid 2024-12-03T23:30:17,029 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8264560b9123327c4d17ce9a7a623e39 2024-12-03T23:30:17,029 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-03T23:30:17,034 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:17,037 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-03T23:30:17,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:17,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:17,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:17,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:17,072 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-03T23:30:17,072 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-03T23:30:17,072 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-03T23:30:17,072 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-03T23:30:17,074 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:17,074 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-03T23:30:17,074 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-03T23:30:17,074 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268617074"}]},"ts":"9223372036854775807"} 2024-12-03T23:30:17,077 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-03T23:30:17,077 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0f2277f66421dd9546af949a5d3e0a94, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94.', STARTKEY => '', ENDKEY => ''}] 2024-12-03T23:30:17,077 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-03T23:30:17,077 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733268617077"}]},"ts":"9223372036854775807"} 2024-12-03T23:30:17,079 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-03T23:30:17,080 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:17,082 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 119 msec 2024-12-03T23:30:17,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:17,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:17,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:17,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:17,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:17,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:17,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:17,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:17,096 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:17,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-03T23:30:17,097 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:17,097 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-03T23:30:17,097 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:17,098 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,099 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:17,099 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:17,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-03T23:30:17,102 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268617102"}]},"ts":"1733268617102"} 2024-12-03T23:30:17,104 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-03T23:30:17,104 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-03T23:30:17,115 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-03T23:30:17,117 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=44fc121d055419da5089ab46382aee4c, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cb8fd3807df9534ef4d2d133dcd44fc7, UNASSIGN}] 2024-12-03T23:30:17,118 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cb8fd3807df9534ef4d2d133dcd44fc7, UNASSIGN 2024-12-03T23:30:17,118 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=44fc121d055419da5089ab46382aee4c, UNASSIGN 2024-12-03T23:30:17,119 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=cb8fd3807df9534ef4d2d133dcd44fc7, regionState=CLOSING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:30:17,119 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=44fc121d055419da5089ab46382aee4c, regionState=CLOSING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:30:17,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cb8fd3807df9534ef4d2d133dcd44fc7, UNASSIGN because future has completed 2024-12-03T23:30:17,121 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:30:17,121 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:30:17,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=44fc121d055419da5089ab46382aee4c, UNASSIGN because future has completed 2024-12-03T23:30:17,122 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=ed8014902436,39085,1733268370401, table=testtb-testExportFileSystemStateWithMergeRegion, region=44fc121d055419da5089ab46382aee4c. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-03T23:30:17,123 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:30:17,123 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 44fc121d055419da5089ab46382aee4c, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:30:17,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-03T23:30:17,275 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:30:17,275 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:30:17,275 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing cb8fd3807df9534ef4d2d133dcd44fc7, disabling compactions & flushes 2024-12-03T23:30:17,275 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:30:17,275 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:30:17,275 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. after waiting 0 ms 2024-12-03T23:30:17,275 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:30:17,281 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close 44fc121d055419da5089ab46382aee4c 2024-12-03T23:30:17,281 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:30:17,281 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing 44fc121d055419da5089ab46382aee4c, disabling compactions & flushes 2024-12-03T23:30:17,281 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:30:17,281 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:30:17,281 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. after waiting 0 ms 2024-12-03T23:30:17,281 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:30:17,302 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:30:17,303 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:30:17,303 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7. 2024-12-03T23:30:17,303 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for cb8fd3807df9534ef4d2d133dcd44fc7: Waiting for close lock at 1733268617275Running coprocessor pre-close hooks at 1733268617275Disabling compacts and flushes for region at 1733268617275Disabling writes for close at 1733268617275Writing region close event to WAL at 1733268617287 (+12 ms)Running coprocessor post-close hooks at 1733268617303 (+16 ms)Closed at 1733268617303 2024-12-03T23:30:17,306 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=cb8fd3807df9534ef4d2d133dcd44fc7, regionState=CLOSED 2024-12-03T23:30:17,308 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:30:17,309 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:30:17,314 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-12-03T23:30:17,314 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure cb8fd3807df9534ef4d2d133dcd44fc7, server=ed8014902436,37351,1733268370600 in 190 msec 2024-12-03T23:30:17,316 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cb8fd3807df9534ef4d2d133dcd44fc7, UNASSIGN in 197 msec 2024-12-03T23:30:17,331 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:30:17,332 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:30:17,332 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c. 2024-12-03T23:30:17,332 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for 44fc121d055419da5089ab46382aee4c: Waiting for close lock at 1733268617281Running coprocessor pre-close hooks at 1733268617281Disabling compacts and flushes for region at 1733268617281Disabling writes for close at 1733268617281Writing region close event to WAL at 1733268617316 (+35 ms)Running coprocessor post-close hooks at 1733268617332 (+16 ms)Closed at 1733268617332 2024-12-03T23:30:17,336 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed 44fc121d055419da5089ab46382aee4c 2024-12-03T23:30:17,338 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=44fc121d055419da5089ab46382aee4c, regionState=CLOSED 2024-12-03T23:30:17,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 44fc121d055419da5089ab46382aee4c, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:30:17,343 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=164 2024-12-03T23:30:17,344 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure 44fc121d055419da5089ab46382aee4c, server=ed8014902436,39085,1733268370401 in 219 msec 2024-12-03T23:30:17,345 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=164, resume processing ppid=163 2024-12-03T23:30:17,345 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=44fc121d055419da5089ab46382aee4c, UNASSIGN in 226 msec 2024-12-03T23:30:17,359 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-12-03T23:30:17,359 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 240 msec 2024-12-03T23:30:17,361 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268617361"}]},"ts":"1733268617361"} 2024-12-03T23:30:17,364 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-03T23:30:17,364 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-03T23:30:17,369 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 268 msec 2024-12-03T23:30:17,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-03T23:30:17,420 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-03T23:30:17,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,424 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,438 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,439 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,454 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c 2024-12-03T23:30:17,459 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/recovered.edits] 2024-12-03T23:30:17,462 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:30:17,465 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/recovered.edits] 2024-12-03T23:30:17,474 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/cf/f7b0deb481ea488cbe6cfa42358d682a to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/cf/f7b0deb481ea488cbe6cfa42358d682a 2024-12-03T23:30:17,474 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/cf/27c20189367a4626a09b4e96e547cf1a to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/cf/27c20189367a4626a09b4e96e547cf1a 2024-12-03T23:30:17,482 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7/recovered.edits/9.seqid 2024-12-03T23:30:17,563 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:30:17,563 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c/recovered.edits/9.seqid 2024-12-03T23:30:17,564 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithMergeRegion/44fc121d055419da5089ab46382aee4c 2024-12-03T23:30:17,564 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-03T23:30:17,565 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-03T23:30:17,566 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-12-03T23:30:17,575 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412031c48078c116840e680be56bb13a6d227_cb8fd3807df9534ef4d2d133dcd44fc7 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412031c48078c116840e680be56bb13a6d227_cb8fd3807df9534ef4d2d133dcd44fc7 2024-12-03T23:30:17,576 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241203fb39ee1cf9cc49bfad2f8e1fdba375f8_44fc121d055419da5089ab46382aee4c to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241203fb39ee1cf9cc49bfad2f8e1fdba375f8_44fc121d055419da5089ab46382aee4c 2024-12-03T23:30:17,577 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-03T23:30:17,580 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,583 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-03T23:30:17,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,592 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-03T23:30:17,592 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-03T23:30:17,592 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-03T23:30:17,593 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-03T23:30:17,594 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-03T23:30:17,598 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,598 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-03T23:30:17,598 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268617598"}]},"ts":"9223372036854775807"} 2024-12-03T23:30:17,598 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268617598"}]},"ts":"9223372036854775807"} 2024-12-03T23:30:17,601 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-03T23:30:17,601 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 44fc121d055419da5089ab46382aee4c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733268595235.44fc121d055419da5089ab46382aee4c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => cb8fd3807df9534ef4d2d133dcd44fc7, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733268595235.cb8fd3807df9534ef4d2d133dcd44fc7.', STARTKEY => '1', ENDKEY => ''}] 2024-12-03T23:30:17,601 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-03T23:30:17,601 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733268617601"}]},"ts":"9223372036854775807"} 2024-12-03T23:30:17,603 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-03T23:30:17,604 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,606 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 183 msec 2024-12-03T23:30:17,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:17,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:17,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:17,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:17,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-03T23:30:17,702 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,702 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-03T23:30:17,712 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-03T23:30:17,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,717 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-03T23:30:17,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:17,721 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-12-03T23:30:17,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:17,757 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=816 (was 812) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:56166 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:43402 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 149165) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:56112 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:41921 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-284623850_1 at /127.0.0.1:43380 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6415 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) - Thread LEAK? -, OpenFileDescriptor=816 (was 803) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=740 (was 797), ProcessCount=23 (was 20) - ProcessCount LEAK? -, AvailableMemoryMB=1915 (was 2382) 2024-12-03T23:30:17,757 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=816 is superior to 500 2024-12-03T23:30:17,784 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=816, OpenFileDescriptor=816, MaxFileDescriptor=1048576, SystemLoadAverage=740, ProcessCount=23, AvailableMemoryMB=1912 2024-12-03T23:30:17,784 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=816 is superior to 500 2024-12-03T23:30:17,787 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:30:17,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-03T23:30:17,793 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:30:17,794 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-12-03T23:30:17,795 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:30:17,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-03T23:30:17,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742242_1418 (size=443) 2024-12-03T23:30:17,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742242_1418 (size=443) 2024-12-03T23:30:17,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742242_1418 (size=443) 2024-12-03T23:30:17,826 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b64cb6d11302c793c7b718a6ff0bad5d, NAME => 'testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:17,826 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1fbf27fd8fbbc762a742db3946a4b62e, NAME => 'testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:17,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742244_1420 (size=68) 2024-12-03T23:30:17,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742244_1420 (size=68) 2024-12-03T23:30:17,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742244_1420 (size=68) 2024-12-03T23:30:17,873 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:17,873 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing b64cb6d11302c793c7b718a6ff0bad5d, disabling compactions & flushes 2024-12-03T23:30:17,873 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:17,873 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:17,873 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. after waiting 0 ms 2024-12-03T23:30:17,873 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:17,873 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:17,874 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for b64cb6d11302c793c7b718a6ff0bad5d: Waiting for close lock at 1733268617873Disabling compacts and flushes for region at 1733268617873Disabling writes for close at 1733268617873Writing region close event to WAL at 1733268617873Closed at 1733268617873 2024-12-03T23:30:17,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742243_1419 (size=68) 2024-12-03T23:30:17,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742243_1419 (size=68) 2024-12-03T23:30:17,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742243_1419 (size=68) 2024-12-03T23:30:17,879 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:17,879 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 1fbf27fd8fbbc762a742db3946a4b62e, disabling compactions & flushes 2024-12-03T23:30:17,879 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:17,879 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:17,879 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. after waiting 0 ms 2024-12-03T23:30:17,879 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:17,879 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:17,879 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 1fbf27fd8fbbc762a742db3946a4b62e: Waiting for close lock at 1733268617879Disabling compacts and flushes for region at 1733268617879Disabling writes for close at 1733268617879Writing region close event to WAL at 1733268617879Closed at 1733268617879 2024-12-03T23:30:17,881 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:30:17,881 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733268617881"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268617881"}]},"ts":"1733268617881"} 2024-12-03T23:30:17,881 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733268617881"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268617881"}]},"ts":"1733268617881"} 2024-12-03T23:30:17,883 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-03T23:30:17,884 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:30:17,884 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268617884"}]},"ts":"1733268617884"} 2024-12-03T23:30:17,886 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-03T23:30:17,886 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:30:17,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:30:17,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:30:17,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:30:17,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:30:17,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:30:17,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:30:17,888 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:30:17,888 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:30:17,888 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:30:17,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:30:17,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b64cb6d11302c793c7b718a6ff0bad5d, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1fbf27fd8fbbc762a742db3946a4b62e, ASSIGN}] 2024-12-03T23:30:17,889 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1fbf27fd8fbbc762a742db3946a4b62e, ASSIGN 2024-12-03T23:30:17,889 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b64cb6d11302c793c7b718a6ff0bad5d, ASSIGN 2024-12-03T23:30:17,890 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1fbf27fd8fbbc762a742db3946a4b62e, ASSIGN; state=OFFLINE, location=ed8014902436,41121,1733268370525; forceNewPlan=false, retain=false 2024-12-03T23:30:17,890 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b64cb6d11302c793c7b718a6ff0bad5d, ASSIGN; state=OFFLINE, location=ed8014902436,37351,1733268370600; forceNewPlan=false, retain=false 2024-12-03T23:30:17,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-03T23:30:18,041 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:30:18,041 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=b64cb6d11302c793c7b718a6ff0bad5d, regionState=OPENING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:30:18,041 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=1fbf27fd8fbbc762a742db3946a4b62e, regionState=OPENING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:30:18,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1fbf27fd8fbbc762a742db3946a4b62e, ASSIGN because future has completed 2024-12-03T23:30:18,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:30:18,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b64cb6d11302c793c7b718a6ff0bad5d, ASSIGN because future has completed 2024-12-03T23:30:18,044 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:30:18,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-03T23:30:18,197 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:18,197 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => 1fbf27fd8fbbc762a742db3946a4b62e, NAME => 'testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e.', STARTKEY => '1', ENDKEY => ''} 2024-12-03T23:30:18,198 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. service=AccessControlService 2024-12-03T23:30:18,198 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:30:18,198 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,198 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:18,198 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,198 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,199 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:18,199 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => b64cb6d11302c793c7b718a6ff0bad5d, NAME => 'testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d.', STARTKEY => '', ENDKEY => '1'} 2024-12-03T23:30:18,200 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. service=AccessControlService 2024-12-03T23:30:18,200 INFO [StoreOpener-1fbf27fd8fbbc762a742db3946a4b62e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,200 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:30:18,200 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,200 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:18,200 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,200 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,201 INFO [StoreOpener-b64cb6d11302c793c7b718a6ff0bad5d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,202 INFO [StoreOpener-1fbf27fd8fbbc762a742db3946a4b62e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1fbf27fd8fbbc762a742db3946a4b62e columnFamilyName cf 2024-12-03T23:30:18,203 INFO [StoreOpener-b64cb6d11302c793c7b718a6ff0bad5d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b64cb6d11302c793c7b718a6ff0bad5d columnFamilyName cf 2024-12-03T23:30:18,205 DEBUG [StoreOpener-b64cb6d11302c793c7b718a6ff0bad5d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:18,205 INFO [StoreOpener-b64cb6d11302c793c7b718a6ff0bad5d-1 {}] regionserver.HStore(327): Store=b64cb6d11302c793c7b718a6ff0bad5d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:30:18,206 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,206 DEBUG [StoreOpener-1fbf27fd8fbbc762a742db3946a4b62e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:18,207 INFO [StoreOpener-1fbf27fd8fbbc762a742db3946a4b62e-1 {}] regionserver.HStore(327): Store=1fbf27fd8fbbc762a742db3946a4b62e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:30:18,208 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,208 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,214 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,214 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,214 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,215 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,216 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,216 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,216 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,216 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,222 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,233 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:30:18,234 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened b64cb6d11302c793c7b718a6ff0bad5d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62644865, jitterRate=-0.06651876866817474}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:30:18,234 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,235 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for b64cb6d11302c793c7b718a6ff0bad5d: Running coprocessor pre-open hook at 1733268618200Writing region info on filesystem at 1733268618200Initializing all the Stores at 1733268618201 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268618201Cleaning up temporary data from old regions at 1733268618214 (+13 ms)Running coprocessor post-open hooks at 1733268618234 (+20 ms)Region opened successfully at 1733268618235 (+1 ms) 2024-12-03T23:30:18,236 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d., pid=173, masterSystemTime=1733268618196 2024-12-03T23:30:18,239 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:18,239 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:18,242 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:30:18,242 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=b64cb6d11302c793c7b718a6ff0bad5d, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:30:18,242 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened 1fbf27fd8fbbc762a742db3946a4b62e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75252991, jitterRate=0.12135694921016693}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:30:18,242 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,243 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for 1fbf27fd8fbbc762a742db3946a4b62e: Running coprocessor pre-open hook at 1733268618198Writing region info on filesystem at 1733268618198Initializing all the Stores at 1733268618199 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268618199Cleaning up temporary data from old regions at 1733268618216 (+17 ms)Running coprocessor post-open hooks at 1733268618242 (+26 ms)Region opened successfully at 1733268618243 (+1 ms) 2024-12-03T23:30:18,244 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e., pid=172, masterSystemTime=1733268618194 2024-12-03T23:30:18,245 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:30:18,246 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:18,247 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:18,247 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=1fbf27fd8fbbc762a742db3946a4b62e, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:30:18,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:30:18,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-12-03T23:30:18,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d, server=ed8014902436,37351,1733268370600 in 203 msec 2024-12-03T23:30:18,256 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-12-03T23:30:18,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b64cb6d11302c793c7b718a6ff0bad5d, ASSIGN in 365 msec 2024-12-03T23:30:18,256 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e, server=ed8014902436,41121,1733268370525 in 210 msec 2024-12-03T23:30:18,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=171, resume processing ppid=169 2024-12-03T23:30:18,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1fbf27fd8fbbc762a742db3946a4b62e, ASSIGN in 368 msec 2024-12-03T23:30:18,259 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:30:18,259 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268618259"}]},"ts":"1733268618259"} 2024-12-03T23:30:18,260 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-03T23:30:18,261 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:30:18,261 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-03T23:30:18,265 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-03T23:30:18,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:18,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:18,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:18,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:18,304 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:18,305 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:18,305 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:18,307 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:18,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 517 msec 2024-12-03T23:30:18,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-03T23:30:18,420 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-03T23:30:18,420 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:18,422 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-03T23:30:18,422 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:18,423 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:30:18,424 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:18,429 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:18,435 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:18,437 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-03T23:30:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268618437 (current time:1733268618437). 2024-12-03T23:30:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:30:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-03T23:30:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:30:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bade0b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:18,439 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:18,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:18,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:18,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b2fb261, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:18,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:18,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:18,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:18,440 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46070, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:18,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d79cfe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:18,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:18,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:18,443 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39324, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:18,444 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:18,444 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f2e6075, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:18,445 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:18,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:18,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:18,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6952605d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:18,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:18,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:18,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:18,447 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46074, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:18,447 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@303a025b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:18,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:18,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:18,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:18,450 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39334, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:18,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:30:18,452 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:18,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:18,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:18,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:18,452 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:18,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-03T23:30:18,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:30:18,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-03T23:30:18,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-03T23:30:18,455 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:30:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-03T23:30:18,455 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:30:18,458 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:30:18,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742245_1421 (size=170) 2024-12-03T23:30:18,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742245_1421 (size=170) 2024-12-03T23:30:18,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742245_1421 (size=170) 2024-12-03T23:30:18,463 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:30:18,464 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e}] 2024-12-03T23:30:18,465 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,465 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-03T23:30:18,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-12-03T23:30:18,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-12-03T23:30:18,616 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:18,616 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:18,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for 1fbf27fd8fbbc762a742db3946a4b62e: 2024-12-03T23:30:18,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for b64cb6d11302c793c7b718a6ff0bad5d: 2024-12-03T23:30:18,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-03T23:30:18,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-03T23:30:18,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-03T23:30:18,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-03T23:30:18,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:18,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:18,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:30:18,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:30:18,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742246_1422 (size=71) 2024-12-03T23:30:18,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742246_1422 (size=71) 2024-12-03T23:30:18,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742247_1423 (size=71) 2024-12-03T23:30:18,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742247_1423 (size=71) 2024-12-03T23:30:18,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742246_1422 (size=71) 2024-12-03T23:30:18,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:18,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-12-03T23:30:18,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-12-03T23:30:18,624 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,624 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742247_1423 (size=71) 2024-12-03T23:30:18,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:18,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-03T23:30:18,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-12-03T23:30:18,625 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,625 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,626 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d in 162 msec 2024-12-03T23:30:18,627 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=176, resume processing ppid=174 2024-12-03T23:30:18,627 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e in 162 msec 2024-12-03T23:30:18,627 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:30:18,634 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:30:18,635 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:30:18,635 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:30:18,635 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:18,635 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-03T23:30:18,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742248_1424 (size=63) 2024-12-03T23:30:18,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742248_1424 (size=63) 2024-12-03T23:30:18,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742248_1424 (size=63) 2024-12-03T23:30:18,645 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:30:18,645 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-03T23:30:18,646 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-03T23:30:18,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742249_1425 (size=653) 2024-12-03T23:30:18,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742249_1425 (size=653) 2024-12-03T23:30:18,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742249_1425 (size=653) 2024-12-03T23:30:18,658 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:30:18,662 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:30:18,662 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-03T23:30:18,663 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:30:18,663 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-03T23:30:18,664 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 210 msec 2024-12-03T23:30:18,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-03T23:30:18,770 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-03T23:30:18,774 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37351 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:30:18,775 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:30:18,776 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:18,778 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-03T23:30:18,778 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:18,778 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:30:18,780 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:18,783 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:18,788 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:18,790 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-03T23:30:18,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268618790 (current time:1733268618790). 2024-12-03T23:30:18,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:30:18,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-03T23:30:18,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:30:18,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@301b8066, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:18,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:18,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:18,791 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:18,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:18,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:18,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1eb8e8d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:18,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:18,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:18,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:18,792 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46084, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:18,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45550139, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:18,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:18,794 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:18,794 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:18,795 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39346, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:18,796 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:18,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:18,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:18,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:18,796 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:18,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20f5f8fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:18,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:18,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:18,797 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:18,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:18,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:18,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68819a7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:18,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:18,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:18,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:18,799 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46110, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:18,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@544f4e6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:18,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:18,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:18,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:18,802 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39356, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:18,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:30:18,805 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:18,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:18,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:18,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:18,805 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:18,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-03T23:30:18,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:30:18,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-03T23:30:18,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-03T23:30:18,807 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:30:18,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-03T23:30:18,808 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:30:18,810 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:30:18,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742250_1426 (size=165) 2024-12-03T23:30:18,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742250_1426 (size=165) 2024-12-03T23:30:18,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742250_1426 (size=165) 2024-12-03T23:30:18,816 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:30:18,816 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e}] 2024-12-03T23:30:18,817 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:18,817 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:18,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-03T23:30:18,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-12-03T23:30:18,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-12-03T23:30:18,968 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:18,968 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:18,969 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing 1fbf27fd8fbbc762a742db3946a4b62e 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-03T23:30:18,969 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing b64cb6d11302c793c7b718a6ff0bad5d 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-03T23:30:18,998 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203cb38e45ec6884449b866d663b3746c30_1fbf27fd8fbbc762a742db3946a4b62e is 71, key is 1faa6a82ddf84af72b96af5e2f96563c/cf:q/1733268618775/Put/seqid=0 2024-12-03T23:30:19,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742251_1427 (size=7962) 2024-12-03T23:30:19,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742251_1427 (size=7962) 2024-12-03T23:30:19,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203cbe6813865c34cd78ee8d3b8550111d0_b64cb6d11302c793c7b718a6ff0bad5d is 71, key is 00d16fce13745fefe24073fc919b56cd/cf:q/1733268618774/Put/seqid=0 2024-12-03T23:30:19,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742251_1427 (size=7962) 2024-12-03T23:30:19,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:19,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742252_1428 (size=5311) 2024-12-03T23:30:19,009 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203cb38e45ec6884449b866d663b3746c30_1fbf27fd8fbbc762a742db3946a4b62e to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241203cb38e45ec6884449b866d663b3746c30_1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:19,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742252_1428 (size=5311) 2024-12-03T23:30:19,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742252_1428 (size=5311) 2024-12-03T23:30:19,010 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:19,011 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/.tmp/cf/ce845c92760343d7a3bf7b566b4c2b76, store: [table=testtb-testExportExpiredSnapshot family=cf region=1fbf27fd8fbbc762a742db3946a4b62e] 2024-12-03T23:30:19,011 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/.tmp/cf/ce845c92760343d7a3bf7b566b4c2b76 is 209, key is 143e4df0af440f74390bbb48607beb66a/cf:q/1733268618775/Put/seqid=0 2024-12-03T23:30:19,015 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203cbe6813865c34cd78ee8d3b8550111d0_b64cb6d11302c793c7b718a6ff0bad5d to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241203cbe6813865c34cd78ee8d3b8550111d0_b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:19,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742253_1429 (size=14384) 2024-12-03T23:30:19,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742253_1429 (size=14384) 2024-12-03T23:30:19,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/.tmp/cf/7b835e2b9b044e4e8ff4a112ed4e6f09, store: [table=testtb-testExportExpiredSnapshot family=cf region=b64cb6d11302c793c7b718a6ff0bad5d] 2024-12-03T23:30:19,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742253_1429 (size=14384) 2024-12-03T23:30:19,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/.tmp/cf/7b835e2b9b044e4e8ff4a112ed4e6f09 is 209, key is 0642b0ff49a399db2d7ee81d7ad4069fb/cf:q/1733268618774/Put/seqid=0 2024-12-03T23:30:19,017 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/.tmp/cf/ce845c92760343d7a3bf7b566b4c2b76 2024-12-03T23:30:19,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742254_1430 (size=6531) 2024-12-03T23:30:19,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742254_1430 (size=6531) 2024-12-03T23:30:19,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742254_1430 (size=6531) 2024-12-03T23:30:19,021 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/.tmp/cf/7b835e2b9b044e4e8ff4a112ed4e6f09 2024-12-03T23:30:19,022 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/.tmp/cf/ce845c92760343d7a3bf7b566b4c2b76 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/cf/ce845c92760343d7a3bf7b566b4c2b76 2024-12-03T23:30:19,025 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/cf/ce845c92760343d7a3bf7b566b4c2b76, entries=44, sequenceid=6, filesize=14.0 K 2024-12-03T23:30:19,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/.tmp/cf/7b835e2b9b044e4e8ff4a112ed4e6f09 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/cf/7b835e2b9b044e4e8ff4a112ed4e6f09 2024-12-03T23:30:19,026 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 1fbf27fd8fbbc762a742db3946a4b62e in 58ms, sequenceid=6, compaction requested=false 2024-12-03T23:30:19,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-03T23:30:19,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for 1fbf27fd8fbbc762a742db3946a4b62e: 2024-12-03T23:30:19,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. for snaptb0-testExportExpiredSnapshot completed. 2024-12-03T23:30:19,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-03T23:30:19,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:19,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/cf/ce845c92760343d7a3bf7b566b4c2b76] hfiles 2024-12-03T23:30:19,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/cf/ce845c92760343d7a3bf7b566b4c2b76 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-03T23:30:19,030 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/cf/7b835e2b9b044e4e8ff4a112ed4e6f09, entries=6, sequenceid=6, filesize=6.4 K 2024-12-03T23:30:19,031 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for b64cb6d11302c793c7b718a6ff0bad5d in 63ms, sequenceid=6, compaction requested=false 2024-12-03T23:30:19,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for b64cb6d11302c793c7b718a6ff0bad5d: 2024-12-03T23:30:19,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. for snaptb0-testExportExpiredSnapshot completed. 2024-12-03T23:30:19,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-03T23:30:19,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:19,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/cf/7b835e2b9b044e4e8ff4a112ed4e6f09] hfiles 2024-12-03T23:30:19,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/cf/7b835e2b9b044e4e8ff4a112ed4e6f09 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-03T23:30:19,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742255_1431 (size=110) 2024-12-03T23:30:19,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742255_1431 (size=110) 2024-12-03T23:30:19,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742255_1431 (size=110) 2024-12-03T23:30:19,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:19,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-12-03T23:30:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-12-03T23:30:19,035 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:19,035 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:19,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742256_1432 (size=110) 2024-12-03T23:30:19,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742256_1432 (size=110) 2024-12-03T23:30:19,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742256_1432 (size=110) 2024-12-03T23:30:19,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e in 221 msec 2024-12-03T23:30:19,039 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:19,039 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-03T23:30:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-12-03T23:30:19,039 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:19,039 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:19,041 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=177 2024-12-03T23:30:19,042 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:30:19,042 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d in 224 msec 2024-12-03T23:30:19,042 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:30:19,043 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:30:19,043 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:30:19,043 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:19,044 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241203cb38e45ec6884449b866d663b3746c30_1fbf27fd8fbbc762a742db3946a4b62e, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241203cbe6813865c34cd78ee8d3b8550111d0_b64cb6d11302c793c7b718a6ff0bad5d] hfiles 2024-12-03T23:30:19,044 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241203cb38e45ec6884449b866d663b3746c30_1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:19,044 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241203cbe6813865c34cd78ee8d3b8550111d0_b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:19,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742257_1433 (size=294) 2024-12-03T23:30:19,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742257_1433 (size=294) 2024-12-03T23:30:19,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742257_1433 (size=294) 2024-12-03T23:30:19,050 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:30:19,050 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-03T23:30:19,051 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-03T23:30:19,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742258_1434 (size=963) 2024-12-03T23:30:19,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742258_1434 (size=963) 2024-12-03T23:30:19,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742258_1434 (size=963) 2024-12-03T23:30:19,068 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:30:19,074 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:30:19,075 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-03T23:30:19,076 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:30:19,076 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-03T23:30:19,077 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 270 msec 2024-12-03T23:30:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-03T23:30:19,120 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-03T23:30:19,121 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:30:19,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-03T23:30:19,122 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:30:19,122 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-12-03T23:30:19,123 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:30:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-03T23:30:19,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742259_1435 (size=436) 2024-12-03T23:30:19,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742259_1435 (size=436) 2024-12-03T23:30:19,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742259_1435 (size=436) 2024-12-03T23:30:19,130 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c6477990e979586f139ab47f9dab2f20, NAME => 'testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:19,131 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => b50632fd0085469e770e68c6467cbdf0, NAME => 'testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:19,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742260_1436 (size=61) 2024-12-03T23:30:19,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742260_1436 (size=61) 2024-12-03T23:30:19,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742260_1436 (size=61) 2024-12-03T23:30:19,145 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:19,146 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing c6477990e979586f139ab47f9dab2f20, disabling compactions & flushes 2024-12-03T23:30:19,146 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:30:19,146 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:30:19,146 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. after waiting 0 ms 2024-12-03T23:30:19,146 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:30:19,146 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:30:19,146 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for c6477990e979586f139ab47f9dab2f20: Waiting for close lock at 1733268619146Disabling compacts and flushes for region at 1733268619146Disabling writes for close at 1733268619146Writing region close event to WAL at 1733268619146Closed at 1733268619146 2024-12-03T23:30:19,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742261_1437 (size=61) 2024-12-03T23:30:19,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742261_1437 (size=61) 2024-12-03T23:30:19,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742261_1437 (size=61) 2024-12-03T23:30:19,147 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:19,147 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing b50632fd0085469e770e68c6467cbdf0, disabling compactions & flushes 2024-12-03T23:30:19,147 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:30:19,147 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:30:19,147 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. after waiting 0 ms 2024-12-03T23:30:19,147 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:30:19,147 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:30:19,147 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for b50632fd0085469e770e68c6467cbdf0: Waiting for close lock at 1733268619147Disabling compacts and flushes for region at 1733268619147Disabling writes for close at 1733268619147Writing region close event to WAL at 1733268619147Closed at 1733268619147 2024-12-03T23:30:19,149 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:30:19,149 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733268619149"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268619149"}]},"ts":"1733268619149"} 2024-12-03T23:30:19,149 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733268619149"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268619149"}]},"ts":"1733268619149"} 2024-12-03T23:30:19,151 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-03T23:30:19,152 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:30:19,152 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268619152"}]},"ts":"1733268619152"} 2024-12-03T23:30:19,153 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-03T23:30:19,153 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:30:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:30:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:30:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:30:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:30:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:30:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:30:19,155 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:30:19,155 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:30:19,155 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:30:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:30:19,155 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c6477990e979586f139ab47f9dab2f20, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b50632fd0085469e770e68c6467cbdf0, ASSIGN}] 2024-12-03T23:30:19,156 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b50632fd0085469e770e68c6467cbdf0, ASSIGN 2024-12-03T23:30:19,156 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c6477990e979586f139ab47f9dab2f20, ASSIGN 2024-12-03T23:30:19,157 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b50632fd0085469e770e68c6467cbdf0, ASSIGN; state=OFFLINE, location=ed8014902436,41121,1733268370525; forceNewPlan=false, retain=false 2024-12-03T23:30:19,157 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c6477990e979586f139ab47f9dab2f20, ASSIGN; state=OFFLINE, location=ed8014902436,37351,1733268370600; forceNewPlan=false, retain=false 2024-12-03T23:30:19,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-03T23:30:19,307 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:30:19,308 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=c6477990e979586f139ab47f9dab2f20, regionState=OPENING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:30:19,308 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=b50632fd0085469e770e68c6467cbdf0, regionState=OPENING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:30:19,309 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b50632fd0085469e770e68c6467cbdf0, ASSIGN because future has completed 2024-12-03T23:30:19,309 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure b50632fd0085469e770e68c6467cbdf0, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:30:19,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c6477990e979586f139ab47f9dab2f20, ASSIGN because future has completed 2024-12-03T23:30:19,310 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure c6477990e979586f139ab47f9dab2f20, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:30:19,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-03T23:30:19,463 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:30:19,463 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:30:19,463 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => c6477990e979586f139ab47f9dab2f20, NAME => 'testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20.', STARTKEY => '', ENDKEY => '1'} 2024-12-03T23:30:19,463 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => b50632fd0085469e770e68c6467cbdf0, NAME => 'testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0.', STARTKEY => '1', ENDKEY => ''} 2024-12-03T23:30:19,464 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. service=AccessControlService 2024-12-03T23:30:19,464 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. service=AccessControlService 2024-12-03T23:30:19,464 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:30:19,464 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:30:19,464 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:19,464 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:19,464 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:19,464 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:19,464 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:19,464 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:19,464 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:19,464 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:19,465 INFO [StoreOpener-b50632fd0085469e770e68c6467cbdf0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:19,465 INFO [StoreOpener-c6477990e979586f139ab47f9dab2f20-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:19,466 INFO [StoreOpener-c6477990e979586f139ab47f9dab2f20-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c6477990e979586f139ab47f9dab2f20 columnFamilyName cf 2024-12-03T23:30:19,466 INFO [StoreOpener-b50632fd0085469e770e68c6467cbdf0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b50632fd0085469e770e68c6467cbdf0 columnFamilyName cf 2024-12-03T23:30:19,467 DEBUG [StoreOpener-c6477990e979586f139ab47f9dab2f20-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:19,467 DEBUG [StoreOpener-b50632fd0085469e770e68c6467cbdf0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:19,468 INFO [StoreOpener-c6477990e979586f139ab47f9dab2f20-1 {}] regionserver.HStore(327): Store=c6477990e979586f139ab47f9dab2f20/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:30:19,468 INFO [StoreOpener-b50632fd0085469e770e68c6467cbdf0-1 {}] regionserver.HStore(327): Store=b50632fd0085469e770e68c6467cbdf0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:30:19,468 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:19,468 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:19,468 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:19,468 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:19,469 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:19,469 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:19,469 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:19,469 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:19,470 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:19,471 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:19,471 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:19,472 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/b50632fd0085469e770e68c6467cbdf0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:30:19,473 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened b50632fd0085469e770e68c6467cbdf0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65163664, jitterRate=-0.02898573875427246}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:30:19,473 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:19,473 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for b50632fd0085469e770e68c6467cbdf0: Running coprocessor pre-open hook at 1733268619464Writing region info on filesystem at 1733268619464Initializing all the Stores at 1733268619465 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268619465Cleaning up temporary data from old regions at 1733268619469 (+4 ms)Running coprocessor post-open hooks at 1733268619473 (+4 ms)Region opened successfully at 1733268619473 2024-12-03T23:30:19,474 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0., pid=183, masterSystemTime=1733268619461 2024-12-03T23:30:19,475 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:19,476 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:30:19,476 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:30:19,477 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=b50632fd0085469e770e68c6467cbdf0, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:30:19,477 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/c6477990e979586f139ab47f9dab2f20/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:30:19,477 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened c6477990e979586f139ab47f9dab2f20; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70196338, jitterRate=0.0460069477558136}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:30:19,477 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:19,478 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for c6477990e979586f139ab47f9dab2f20: Running coprocessor pre-open hook at 1733268619464Writing region info on filesystem at 1733268619464Initializing all the Stores at 1733268619465 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268619465Cleaning up temporary data from old regions at 1733268619471 (+6 ms)Running coprocessor post-open hooks at 1733268619477 (+6 ms)Region opened successfully at 1733268619478 (+1 ms) 2024-12-03T23:30:19,478 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20., pid=184, masterSystemTime=1733268619461 2024-12-03T23:30:19,479 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure b50632fd0085469e770e68c6467cbdf0, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:30:19,479 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:30:19,480 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:30:19,481 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=c6477990e979586f139ab47f9dab2f20, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:30:19,482 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=182 2024-12-03T23:30:19,482 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure b50632fd0085469e770e68c6467cbdf0, server=ed8014902436,41121,1733268370525 in 170 msec 2024-12-03T23:30:19,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure c6477990e979586f139ab47f9dab2f20, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:30:19,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b50632fd0085469e770e68c6467cbdf0, ASSIGN in 327 msec 2024-12-03T23:30:19,485 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=181 2024-12-03T23:30:19,485 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure c6477990e979586f139ab47f9dab2f20, server=ed8014902436,37351,1733268370600 in 173 msec 2024-12-03T23:30:19,487 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-12-03T23:30:19,487 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c6477990e979586f139ab47f9dab2f20, ASSIGN in 330 msec 2024-12-03T23:30:19,488 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:30:19,488 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268619488"}]},"ts":"1733268619488"} 2024-12-03T23:30:19,489 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-03T23:30:19,490 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:30:19,490 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-03T23:30:19,493 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-03T23:30:19,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:19,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:19,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:19,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:19,580 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:19,580 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:19,580 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:19,580 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:19,580 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:19,580 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:19,580 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:19,580 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:19,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 458 msec 2024-12-03T23:30:19,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-03T23:30:19,750 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-12-03T23:30:19,750 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:19,753 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-03T23:30:19,753 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:30:19,753 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:30:19,755 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:19,762 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:19,766 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:19,774 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37351 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:30:19,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:30:19,778 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:19,789 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-03T23:30:19,789 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:30:19,789 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:30:19,791 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:19,795 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-03T23:30:19,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-03T23:30:19,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-03T23:30:19,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:30:19,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2806d96f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:19,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:19,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:19,802 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:19,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:19,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:19,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@396e2f89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:19,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:19,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:19,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:19,803 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46120, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:19,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@460dfc91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:19,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:19,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:19,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:19,805 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39368, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:19,806 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:19,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:19,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:19,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:19,806 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:19,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@234403aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:19,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:19,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:19,807 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:19,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:19,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:19,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8d4c45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:19,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:19,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:19,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:19,808 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46142, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:19,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cfa6c64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:19,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:19,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:19,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:19,810 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39378, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:19,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:30:19,813 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:19,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:19,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:19,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:19,813 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:19,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-03T23:30:19,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:30:19,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-03T23:30:19,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-03T23:30:19,815 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:30:19,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-03T23:30:19,816 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:30:19,818 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:30:19,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742262_1438 (size=152) 2024-12-03T23:30:19,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742262_1438 (size=152) 2024-12-03T23:30:19,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742262_1438 (size=152) 2024-12-03T23:30:19,828 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:30:19,828 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6477990e979586f139ab47f9dab2f20}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b50632fd0085469e770e68c6467cbdf0}] 2024-12-03T23:30:19,828 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:19,829 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:19,917 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-03T23:30:19,917 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-03T23:30:19,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-03T23:30:19,918 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-03T23:30:19,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-03T23:30:19,919 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-03T23:30:19,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-03T23:30:19,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-03T23:30:19,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-03T23:30:19,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:30:19,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:30:19,980 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing c6477990e979586f139ab47f9dab2f20 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-03T23:30:19,980 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing b50632fd0085469e770e68c6467cbdf0 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-03T23:30:19,997 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203e69f8ff52fc245cfa5b9b759f3debf66_c6477990e979586f139ab47f9dab2f20 is 71, key is 0199229c26e44314c53a4b102c36102f/cf:q/1733268619773/Put/seqid=0 2024-12-03T23:30:20,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120358302a804fc54198b454eb5fa946ce38_b50632fd0085469e770e68c6467cbdf0 is 71, key is 16a19386fc2b32baa742e8ce503674fe/cf:q/1733268619776/Put/seqid=0 2024-12-03T23:30:20,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742263_1439 (size=5102) 2024-12-03T23:30:20,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742263_1439 (size=5102) 2024-12-03T23:30:20,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742263_1439 (size=5102) 2024-12-03T23:30:20,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742264_1440 (size=8171) 2024-12-03T23:30:20,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742264_1440 (size=8171) 2024-12-03T23:30:20,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742264_1440 (size=8171) 2024-12-03T23:30:20,011 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:20,015 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120358302a804fc54198b454eb5fa946ce38_b50632fd0085469e770e68c6467cbdf0 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024120358302a804fc54198b454eb5fa946ce38_b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:20,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/b50632fd0085469e770e68c6467cbdf0/.tmp/cf/ffb186585aeb468a9f180033eae0f1cc, store: [table=testExportExpiredSnapshot family=cf region=b50632fd0085469e770e68c6467cbdf0] 2024-12-03T23:30:20,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/b50632fd0085469e770e68c6467cbdf0/.tmp/cf/ffb186585aeb468a9f180033eae0f1cc is 202, key is 1b7c0dff48f2cfc421307b9f27412903b/cf:q/1733268619776/Put/seqid=0 2024-12-03T23:30:20,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742265_1441 (size=14661) 2024-12-03T23:30:20,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742265_1441 (size=14661) 2024-12-03T23:30:20,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742265_1441 (size=14661) 2024-12-03T23:30:20,021 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/b50632fd0085469e770e68c6467cbdf0/.tmp/cf/ffb186585aeb468a9f180033eae0f1cc 2024-12-03T23:30:20,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/b50632fd0085469e770e68c6467cbdf0/.tmp/cf/ffb186585aeb468a9f180033eae0f1cc as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/b50632fd0085469e770e68c6467cbdf0/cf/ffb186585aeb468a9f180033eae0f1cc 2024-12-03T23:30:20,031 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/b50632fd0085469e770e68c6467cbdf0/cf/ffb186585aeb468a9f180033eae0f1cc, entries=47, sequenceid=5, filesize=14.3 K 2024-12-03T23:30:20,032 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for b50632fd0085469e770e68c6467cbdf0 in 52ms, sequenceid=5, compaction requested=false 2024-12-03T23:30:20,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-03T23:30:20,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for b50632fd0085469e770e68c6467cbdf0: 2024-12-03T23:30:20,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. for snapshot-testExportExpiredSnapshot completed. 2024-12-03T23:30:20,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-03T23:30:20,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:20,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/b50632fd0085469e770e68c6467cbdf0/cf/ffb186585aeb468a9f180033eae0f1cc] hfiles 2024-12-03T23:30:20,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/b50632fd0085469e770e68c6467cbdf0/cf/ffb186585aeb468a9f180033eae0f1cc for snapshot=snapshot-testExportExpiredSnapshot 2024-12-03T23:30:20,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742266_1442 (size=103) 2024-12-03T23:30:20,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742266_1442 (size=103) 2024-12-03T23:30:20,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742266_1442 (size=103) 2024-12-03T23:30:20,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:30:20,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-03T23:30:20,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-12-03T23:30:20,040 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:20,041 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:20,043 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b50632fd0085469e770e68c6467cbdf0 in 214 msec 2024-12-03T23:30:20,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-03T23:30:20,407 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:20,411 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203e69f8ff52fc245cfa5b9b759f3debf66_c6477990e979586f139ab47f9dab2f20 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241203e69f8ff52fc245cfa5b9b759f3debf66_c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:20,412 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/c6477990e979586f139ab47f9dab2f20/.tmp/cf/62bb14ebb8804919bf2920129662c3f8, store: [table=testExportExpiredSnapshot family=cf region=c6477990e979586f139ab47f9dab2f20] 2024-12-03T23:30:20,413 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/c6477990e979586f139ab47f9dab2f20/.tmp/cf/62bb14ebb8804919bf2920129662c3f8 is 202, key is 0d66b655039d73ae66be027a3656c0059/cf:q/1733268619773/Put/seqid=0 2024-12-03T23:30:20,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742267_1443 (size=5890) 2024-12-03T23:30:20,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742267_1443 (size=5890) 2024-12-03T23:30:20,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742267_1443 (size=5890) 2024-12-03T23:30:20,418 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/c6477990e979586f139ab47f9dab2f20/.tmp/cf/62bb14ebb8804919bf2920129662c3f8 2024-12-03T23:30:20,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/c6477990e979586f139ab47f9dab2f20/.tmp/cf/62bb14ebb8804919bf2920129662c3f8 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/c6477990e979586f139ab47f9dab2f20/cf/62bb14ebb8804919bf2920129662c3f8 2024-12-03T23:30:20,427 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/c6477990e979586f139ab47f9dab2f20/cf/62bb14ebb8804919bf2920129662c3f8, entries=3, sequenceid=5, filesize=5.8 K 2024-12-03T23:30:20,428 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for c6477990e979586f139ab47f9dab2f20 in 448ms, sequenceid=5, compaction requested=false 2024-12-03T23:30:20,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for c6477990e979586f139ab47f9dab2f20: 2024-12-03T23:30:20,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. for snapshot-testExportExpiredSnapshot completed. 2024-12-03T23:30:20,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-03T23:30:20,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:20,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/c6477990e979586f139ab47f9dab2f20/cf/62bb14ebb8804919bf2920129662c3f8] hfiles 2024-12-03T23:30:20,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/c6477990e979586f139ab47f9dab2f20/cf/62bb14ebb8804919bf2920129662c3f8 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-03T23:30:20,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-03T23:30:20,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742268_1444 (size=103) 2024-12-03T23:30:20,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742268_1444 (size=103) 2024-12-03T23:30:20,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742268_1444 (size=103) 2024-12-03T23:30:20,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:30:20,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-03T23:30:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-12-03T23:30:20,449 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:20,449 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:20,451 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=186, resume processing ppid=185 2024-12-03T23:30:20,451 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:30:20,451 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c6477990e979586f139ab47f9dab2f20 in 621 msec 2024-12-03T23:30:20,451 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:30:20,452 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:30:20,452 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:30:20,452 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:20,453 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024120358302a804fc54198b454eb5fa946ce38_b50632fd0085469e770e68c6467cbdf0, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241203e69f8ff52fc245cfa5b9b759f3debf66_c6477990e979586f139ab47f9dab2f20] hfiles 2024-12-03T23:30:20,453 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024120358302a804fc54198b454eb5fa946ce38_b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:30:20,453 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241203e69f8ff52fc245cfa5b9b759f3debf66_c6477990e979586f139ab47f9dab2f20 2024-12-03T23:30:20,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742269_1445 (size=287) 2024-12-03T23:30:20,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742269_1445 (size=287) 2024-12-03T23:30:20,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742269_1445 (size=287) 2024-12-03T23:30:20,469 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:30:20,469 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-03T23:30:20,470 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-03T23:30:20,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742270_1446 (size=935) 2024-12-03T23:30:20,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742270_1446 (size=935) 2024-12-03T23:30:20,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742270_1446 (size=935) 2024-12-03T23:30:20,484 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:30:20,489 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:30:20,489 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-03T23:30:20,490 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:30:20,491 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-03T23:30:20,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 677 msec 2024-12-03T23:30:20,708 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0007_000001 (auth:SIMPLE) from 127.0.0.1:60316 2024-12-03T23:30:20,723 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0007/container_1733268377251_0007_01_000001/launch_container.sh] 2024-12-03T23:30:20,723 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0007/container_1733268377251_0007_01_000001/container_tokens] 2024-12-03T23:30:20,723 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0007/container_1733268377251_0007_01_000001/sysfs] 2024-12-03T23:30:20,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-03T23:30:20,949 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-12-03T23:30:22,345 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:30:30,956 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268630956 2024-12-03T23:30:30,956 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37331, tgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268630956, rawTgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268630956, srcFsUri=hdfs://localhost:37331, srcDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:30,981 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37331, inputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:30,981 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268630956, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268630956/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-03T23:30:30,983 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-03T23:30:30,984 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T23:30:30,985 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-03T23:30:30,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-03T23:30:30,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-03T23:30:30,988 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268630988"}]},"ts":"1733268630988"} 2024-12-03T23:30:30,990 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-03T23:30:30,990 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-03T23:30:30,990 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-03T23:30:30,992 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b64cb6d11302c793c7b718a6ff0bad5d, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1fbf27fd8fbbc762a742db3946a4b62e, UNASSIGN}] 2024-12-03T23:30:30,993 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b64cb6d11302c793c7b718a6ff0bad5d, UNASSIGN 2024-12-03T23:30:30,993 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1fbf27fd8fbbc762a742db3946a4b62e, UNASSIGN 2024-12-03T23:30:30,993 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=b64cb6d11302c793c7b718a6ff0bad5d, regionState=CLOSING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:30:30,994 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=1fbf27fd8fbbc762a742db3946a4b62e, regionState=CLOSING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:30:30,995 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b64cb6d11302c793c7b718a6ff0bad5d, UNASSIGN because future has completed 2024-12-03T23:30:30,995 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:30:30,996 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:30:30,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1fbf27fd8fbbc762a742db3946a4b62e, UNASSIGN because future has completed 2024-12-03T23:30:30,996 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:30:30,996 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:30:31,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-03T23:30:31,148 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:31,148 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:30:31,148 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing b64cb6d11302c793c7b718a6ff0bad5d, disabling compactions & flushes 2024-12-03T23:30:31,148 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:31,148 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:31,148 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. after waiting 0 ms 2024-12-03T23:30:31,148 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:31,149 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:31,149 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:30:31,149 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing 1fbf27fd8fbbc762a742db3946a4b62e, disabling compactions & flushes 2024-12-03T23:30:31,149 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:31,149 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:31,149 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. after waiting 0 ms 2024-12-03T23:30:31,149 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:31,152 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:30:31,152 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:30:31,153 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:30:31,153 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:30:31,153 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d. 2024-12-03T23:30:31,153 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e. 2024-12-03T23:30:31,153 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for 1fbf27fd8fbbc762a742db3946a4b62e: Waiting for close lock at 1733268631149Running coprocessor pre-close hooks at 1733268631149Disabling compacts and flushes for region at 1733268631149Disabling writes for close at 1733268631149Writing region close event to WAL at 1733268631150 (+1 ms)Running coprocessor post-close hooks at 1733268631153 (+3 ms)Closed at 1733268631153 2024-12-03T23:30:31,153 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for b64cb6d11302c793c7b718a6ff0bad5d: Waiting for close lock at 1733268631148Running coprocessor pre-close hooks at 1733268631148Disabling compacts and flushes for region at 1733268631148Disabling writes for close at 1733268631148Writing region close event to WAL at 1733268631149 (+1 ms)Running coprocessor post-close hooks at 1733268631153 (+4 ms)Closed at 1733268631153 2024-12-03T23:30:31,154 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:31,155 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=b64cb6d11302c793c7b718a6ff0bad5d, regionState=CLOSED 2024-12-03T23:30:31,155 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed 1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:31,155 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=1fbf27fd8fbbc762a742db3946a4b62e, regionState=CLOSED 2024-12-03T23:30:31,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:30:31,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:30:31,158 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=190 2024-12-03T23:30:31,158 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure b64cb6d11302c793c7b718a6ff0bad5d, server=ed8014902436,37351,1733268370600 in 161 msec 2024-12-03T23:30:31,159 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b64cb6d11302c793c7b718a6ff0bad5d, UNASSIGN in 166 msec 2024-12-03T23:30:31,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=191 2024-12-03T23:30:31,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure 1fbf27fd8fbbc762a742db3946a4b62e, server=ed8014902436,41121,1733268370525 in 161 msec 2024-12-03T23:30:31,160 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-12-03T23:30:31,160 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1fbf27fd8fbbc762a742db3946a4b62e, UNASSIGN in 167 msec 2024-12-03T23:30:31,162 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-12-03T23:30:31,162 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 171 msec 2024-12-03T23:30:31,163 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268631163"}]},"ts":"1733268631163"} 2024-12-03T23:30:31,164 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-03T23:30:31,164 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-03T23:30:31,166 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 180 msec 2024-12-03T23:30:31,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-03T23:30:31,309 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-03T23:30:31,310 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,311 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,312 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,314 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,315 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:31,315 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:31,317 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/recovered.edits] 2024-12-03T23:30:31,317 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/recovered.edits] 2024-12-03T23:30:31,332 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/cf/ce845c92760343d7a3bf7b566b4c2b76 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/cf/ce845c92760343d7a3bf7b566b4c2b76 2024-12-03T23:30:31,332 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/cf/7b835e2b9b044e4e8ff4a112ed4e6f09 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/cf/7b835e2b9b044e4e8ff4a112ed4e6f09 2024-12-03T23:30:31,335 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e/recovered.edits/9.seqid 2024-12-03T23:30:31,335 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d/recovered.edits/9.seqid 2024-12-03T23:30:31,335 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:31,345 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportExpiredSnapshot/b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:31,345 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-03T23:30:31,346 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-03T23:30:31,346 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-12-03T23:30:31,350 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241203cb38e45ec6884449b866d663b3746c30_1fbf27fd8fbbc762a742db3946a4b62e to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241203cb38e45ec6884449b866d663b3746c30_1fbf27fd8fbbc762a742db3946a4b62e 2024-12-03T23:30:31,351 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241203cbe6813865c34cd78ee8d3b8550111d0_b64cb6d11302c793c7b718a6ff0bad5d to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241203cbe6813865c34cd78ee8d3b8550111d0_b64cb6d11302c793c7b718a6ff0bad5d 2024-12-03T23:30:31,351 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-03T23:30:31,354 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,356 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-03T23:30:31,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,377 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-03T23:30:31,377 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-03T23:30:31,378 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-03T23:30:31,378 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-03T23:30:31,378 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-03T23:30:31,379 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,380 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-03T23:30:31,380 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268631380"}]},"ts":"9223372036854775807"} 2024-12-03T23:30:31,380 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268631380"}]},"ts":"9223372036854775807"} 2024-12-03T23:30:31,382 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-03T23:30:31,382 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b64cb6d11302c793c7b718a6ff0bad5d, NAME => 'testtb-testExportExpiredSnapshot,,1733268617786.b64cb6d11302c793c7b718a6ff0bad5d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 1fbf27fd8fbbc762a742db3946a4b62e, NAME => 'testtb-testExportExpiredSnapshot,1,1733268617786.1fbf27fd8fbbc762a742db3946a4b62e.', STARTKEY => '1', ENDKEY => ''}] 2024-12-03T23:30:31,382 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-03T23:30:31,382 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733268631382"}]},"ts":"9223372036854775807"} 2024-12-03T23:30:31,384 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-03T23:30:31,385 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:31,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:31,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:31,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:31,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-03T23:30:31,386 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:31,386 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:31,386 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:31,386 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:31,387 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 75 msec 2024-12-03T23:30:31,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-03T23:30:31,490 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-12-03T23:30:31,490 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-03T23:30:31,498 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-03T23:30:31,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-03T23:30:31,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-12-03T23:30:31,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-03T23:30:31,504 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-03T23:30:31,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-03T23:30:31,522 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=807 (was 816), OpenFileDescriptor=793 (was 816), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=613 (was 740), ProcessCount=11 (was 23), AvailableMemoryMB=2943 (was 1912) - AvailableMemoryMB LEAK? - 2024-12-03T23:30:31,522 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=807 is superior to 500 2024-12-03T23:30:31,537 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=807, OpenFileDescriptor=793, MaxFileDescriptor=1048576, SystemLoadAverage=613, ProcessCount=11, AvailableMemoryMB=2942 2024-12-03T23:30:31,537 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=807 is superior to 500 2024-12-03T23:30:31,538 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:30:31,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-03T23:30:31,540 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:30:31,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-12-03T23:30:31,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-03T23:30:31,541 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:30:31,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742271_1447 (size=448) 2024-12-03T23:30:31,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742271_1447 (size=448) 2024-12-03T23:30:31,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742271_1447 (size=448) 2024-12-03T23:30:31,552 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7d2ccee371d7ad2ad1f5e8638f243bdc, NAME => 'testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:31,552 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 2aec2ee7a6572023bfa117fbcbc896c8, NAME => 'testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:31,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742272_1448 (size=73) 2024-12-03T23:30:31,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742273_1449 (size=73) 2024-12-03T23:30:31,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742272_1448 (size=73) 2024-12-03T23:30:31,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742273_1449 (size=73) 2024-12-03T23:30:31,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742272_1448 (size=73) 2024-12-03T23:30:31,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742273_1449 (size=73) 2024-12-03T23:30:31,558 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:31,558 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:31,558 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 2aec2ee7a6572023bfa117fbcbc896c8, disabling compactions & flushes 2024-12-03T23:30:31,558 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:31,558 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 7d2ccee371d7ad2ad1f5e8638f243bdc, disabling compactions & flushes 2024-12-03T23:30:31,558 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:31,558 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:31,558 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. after waiting 0 ms 2024-12-03T23:30:31,559 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:31,559 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:31,559 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. after waiting 0 ms 2024-12-03T23:30:31,559 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:31,559 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:31,559 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:31,559 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 2aec2ee7a6572023bfa117fbcbc896c8: Waiting for close lock at 1733268631558Disabling compacts and flushes for region at 1733268631558Disabling writes for close at 1733268631559 (+1 ms)Writing region close event to WAL at 1733268631559Closed at 1733268631559 2024-12-03T23:30:31,559 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7d2ccee371d7ad2ad1f5e8638f243bdc: Waiting for close lock at 1733268631558Disabling compacts and flushes for region at 1733268631558Disabling writes for close at 1733268631559 (+1 ms)Writing region close event to WAL at 1733268631559Closed at 1733268631559 2024-12-03T23:30:31,560 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:30:31,560 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733268631560"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268631560"}]},"ts":"1733268631560"} 2024-12-03T23:30:31,560 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733268631560"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268631560"}]},"ts":"1733268631560"} 2024-12-03T23:30:31,562 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-03T23:30:31,562 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:30:31,563 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268631562"}]},"ts":"1733268631562"} 2024-12-03T23:30:31,564 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-03T23:30:31,564 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:30:31,565 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:30:31,565 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:30:31,565 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:30:31,565 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:30:31,565 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:30:31,565 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:30:31,565 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:30:31,565 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:30:31,565 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:30:31,565 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:30:31,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7d2ccee371d7ad2ad1f5e8638f243bdc, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2aec2ee7a6572023bfa117fbcbc896c8, ASSIGN}] 2024-12-03T23:30:31,566 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2aec2ee7a6572023bfa117fbcbc896c8, ASSIGN 2024-12-03T23:30:31,566 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7d2ccee371d7ad2ad1f5e8638f243bdc, ASSIGN 2024-12-03T23:30:31,567 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2aec2ee7a6572023bfa117fbcbc896c8, ASSIGN; state=OFFLINE, location=ed8014902436,39085,1733268370401; forceNewPlan=false, retain=false 2024-12-03T23:30:31,567 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7d2ccee371d7ad2ad1f5e8638f243bdc, ASSIGN; state=OFFLINE, location=ed8014902436,37351,1733268370600; forceNewPlan=false, retain=false 2024-12-03T23:30:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-03T23:30:31,717 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:30:31,718 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=7d2ccee371d7ad2ad1f5e8638f243bdc, regionState=OPENING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:30:31,718 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=2aec2ee7a6572023bfa117fbcbc896c8, regionState=OPENING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:30:31,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7d2ccee371d7ad2ad1f5e8638f243bdc, ASSIGN because future has completed 2024-12-03T23:30:31,720 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:30:31,720 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2aec2ee7a6572023bfa117fbcbc896c8, ASSIGN because future has completed 2024-12-03T23:30:31,721 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:30:31,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-03T23:30:31,874 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:31,874 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => 7d2ccee371d7ad2ad1f5e8638f243bdc, NAME => 'testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc.', STARTKEY => '', ENDKEY => '1'} 2024-12-03T23:30:31,875 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. service=AccessControlService 2024-12-03T23:30:31,875 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:30:31,875 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:31,875 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:31,875 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:31,875 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:31,875 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:31,875 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => 2aec2ee7a6572023bfa117fbcbc896c8, NAME => 'testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8.', STARTKEY => '1', ENDKEY => ''} 2024-12-03T23:30:31,876 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. service=AccessControlService 2024-12-03T23:30:31,876 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:30:31,876 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:31,876 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:31,876 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:31,876 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:31,876 INFO [StoreOpener-7d2ccee371d7ad2ad1f5e8638f243bdc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:31,877 INFO [StoreOpener-2aec2ee7a6572023bfa117fbcbc896c8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:31,878 INFO [StoreOpener-2aec2ee7a6572023bfa117fbcbc896c8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2aec2ee7a6572023bfa117fbcbc896c8 columnFamilyName cf 2024-12-03T23:30:31,878 INFO [StoreOpener-7d2ccee371d7ad2ad1f5e8638f243bdc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7d2ccee371d7ad2ad1f5e8638f243bdc columnFamilyName cf 2024-12-03T23:30:31,878 DEBUG [StoreOpener-2aec2ee7a6572023bfa117fbcbc896c8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:31,878 DEBUG [StoreOpener-7d2ccee371d7ad2ad1f5e8638f243bdc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:31,878 INFO [StoreOpener-2aec2ee7a6572023bfa117fbcbc896c8-1 {}] regionserver.HStore(327): Store=2aec2ee7a6572023bfa117fbcbc896c8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:30:31,878 INFO [StoreOpener-7d2ccee371d7ad2ad1f5e8638f243bdc-1 {}] regionserver.HStore(327): Store=7d2ccee371d7ad2ad1f5e8638f243bdc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:30:31,879 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:31,879 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:31,879 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:31,879 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:31,879 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:31,880 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:31,880 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:31,880 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:31,880 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:31,880 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:31,881 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:31,881 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:31,898 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:30:31,898 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:30:31,898 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened 2aec2ee7a6572023bfa117fbcbc896c8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71926688, jitterRate=0.07179117202758789}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:30:31,898 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened 7d2ccee371d7ad2ad1f5e8638f243bdc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69181596, jitterRate=0.03088611364364624}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:30:31,899 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:31,899 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:31,899 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for 7d2ccee371d7ad2ad1f5e8638f243bdc: Running coprocessor pre-open hook at 1733268631875Writing region info on filesystem at 1733268631875Initializing all the Stores at 1733268631876 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268631876Cleaning up temporary data from old regions at 1733268631880 (+4 ms)Running coprocessor post-open hooks at 1733268631899 (+19 ms)Region opened successfully at 1733268631899 2024-12-03T23:30:31,899 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for 2aec2ee7a6572023bfa117fbcbc896c8: Running coprocessor pre-open hook at 1733268631876Writing region info on filesystem at 1733268631876Initializing all the Stores at 1733268631877 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268631877Cleaning up temporary data from old regions at 1733268631880 (+3 ms)Running coprocessor post-open hooks at 1733268631899 (+19 ms)Region opened successfully at 1733268631899 2024-12-03T23:30:31,900 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8., pid=199, masterSystemTime=1733268631872 2024-12-03T23:30:31,900 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc., pid=198, masterSystemTime=1733268631871 2024-12-03T23:30:31,901 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:31,901 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:31,903 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=7d2ccee371d7ad2ad1f5e8638f243bdc, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:30:31,903 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:31,903 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:31,903 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=2aec2ee7a6572023bfa117fbcbc896c8, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:30:31,904 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:30:31,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:30:31,906 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=196 2024-12-03T23:30:31,906 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc, server=ed8014902436,37351,1733268370600 in 185 msec 2024-12-03T23:30:31,907 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=197 2024-12-03T23:30:31,907 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8, server=ed8014902436,39085,1733268370401 in 185 msec 2024-12-03T23:30:31,907 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7d2ccee371d7ad2ad1f5e8638f243bdc, ASSIGN in 341 msec 2024-12-03T23:30:31,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=195 2024-12-03T23:30:31,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2aec2ee7a6572023bfa117fbcbc896c8, ASSIGN in 342 msec 2024-12-03T23:30:31,909 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:30:31,909 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268631909"}]},"ts":"1733268631909"} 2024-12-03T23:30:31,910 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-03T23:30:31,911 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:30:31,911 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-03T23:30:31,913 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-03T23:30:31,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:31,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:31,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:31,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:31,960 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:31,960 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:31,960 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:31,960 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:31,961 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:31,961 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:31,961 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:31,961 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:31,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 421 msec 2024-12-03T23:30:32,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-03T23:30:32,169 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-03T23:30:32,169 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:30:32,172 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-03T23:30:32,172 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:32,172 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:30:32,174 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:30:32,178 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:30:32,182 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:30:32,185 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-03T23:30:32,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268632185 (current time:1733268632185). 2024-12-03T23:30:32,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:30:32,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-03T23:30:32,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:30:32,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57963e7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:32,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:32,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:32,186 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:32,186 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:32,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:32,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5de89e8e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:32,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:32,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:32,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:32,187 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57652, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:32,188 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e47a3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:32,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:32,189 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:32,189 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:32,190 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47082, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:32,191 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:32,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:32,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:32,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:32,191 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:32,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1281d88a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:32,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:32,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:32,192 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:32,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:32,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:32,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bf6f8a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:32,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:32,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:32,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:32,193 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57674, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:32,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d177c4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:32,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:32,194 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:32,195 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:32,195 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47086, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:32,196 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:30:32,198 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:32,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:32,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:32,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:32,198 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:32,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-03T23:30:32,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:30:32,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-03T23:30:32,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-03T23:30:32,200 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:30:32,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-03T23:30:32,201 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:30:32,203 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:30:32,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742274_1450 (size=185) 2024-12-03T23:30:32,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742274_1450 (size=185) 2024-12-03T23:30:32,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742274_1450 (size=185) 2024-12-03T23:30:32,209 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:30:32,209 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8}] 2024-12-03T23:30:32,212 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:32,214 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:32,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-03T23:30:32,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-12-03T23:30:32,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:32,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 2aec2ee7a6572023bfa117fbcbc896c8: 2024-12-03T23:30:32,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-03T23:30:32,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:32,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:30:32,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-12-03T23:30:32,366 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:32,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for 7d2ccee371d7ad2ad1f5e8638f243bdc: 2024-12-03T23:30:32,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-03T23:30:32,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:32,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:30:32,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742275_1451 (size=76) 2024-12-03T23:30:32,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742275_1451 (size=76) 2024-12-03T23:30:32,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742275_1451 (size=76) 2024-12-03T23:30:32,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:32,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-12-03T23:30:32,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-12-03T23:30:32,372 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:32,372 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:32,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742276_1452 (size=76) 2024-12-03T23:30:32,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742276_1452 (size=76) 2024-12-03T23:30:32,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742276_1452 (size=76) 2024-12-03T23:30:32,374 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:32,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-12-03T23:30:32,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-12-03T23:30:32,375 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:32,375 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:32,375 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8 in 164 msec 2024-12-03T23:30:32,377 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=201, resume processing ppid=200 2024-12-03T23:30:32,377 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:30:32,377 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc in 167 msec 2024-12-03T23:30:32,378 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:30:32,379 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:30:32,379 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:30:32,379 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:32,379 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-03T23:30:32,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742277_1453 (size=68) 2024-12-03T23:30:32,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742277_1453 (size=68) 2024-12-03T23:30:32,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742277_1453 (size=68) 2024-12-03T23:30:32,386 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:30:32,386 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,387 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742278_1454 (size=673) 2024-12-03T23:30:32,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742278_1454 (size=673) 2024-12-03T23:30:32,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742278_1454 (size=673) 2024-12-03T23:30:32,404 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:30:32,408 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:30:32,408 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,410 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:30:32,410 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-03T23:30:32,411 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 211 msec 2024-12-03T23:30:32,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-03T23:30:32,519 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-03T23:30:32,525 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37351 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:30:32,527 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:30:32,527 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:30:32,529 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-03T23:30:32,529 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:32,530 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:30:32,531 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:30:32,535 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:30:32,539 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-03T23:30:32,541 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-03T23:30:32,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268632541 (current time:1733268632541). 2024-12-03T23:30:32,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:30:32,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-03T23:30:32,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:30:32,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39ad5593, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:32,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:32,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:32,543 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:32,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:32,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:32,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ba1fbb1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:32,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:32,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:32,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:32,544 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57694, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:32,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f577e62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:32,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:32,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:32,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:32,546 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47088, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:32,547 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:32,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:32,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:32,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:32,547 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:32,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6852ed19, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:32,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:32,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:32,548 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:32,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:32,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:32,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fabe09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:32,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:32,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:32,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:32,550 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57700, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:32,550 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f5c873a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:32,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:32,551 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:32,551 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:32,552 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47098, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:32,553 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:30:32,554 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:32,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:32,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:32,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:32,555 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:32,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-03T23:30:32,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:30:32,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-03T23:30:32,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-03T23:30:32,557 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:30:32,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-03T23:30:32,557 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:30:32,559 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:30:32,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742279_1455 (size=180) 2024-12-03T23:30:32,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742279_1455 (size=180) 2024-12-03T23:30:32,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742279_1455 (size=180) 2024-12-03T23:30:32,569 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:30:32,569 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8}] 2024-12-03T23:30:32,570 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:32,570 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:32,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-03T23:30:32,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-03T23:30:32,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-03T23:30:32,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:32,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:32,722 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing 7d2ccee371d7ad2ad1f5e8638f243bdc 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-03T23:30:32,722 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing 2aec2ee7a6572023bfa117fbcbc896c8 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-03T23:30:32,752 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203c6a19c75b37e47d7ab4d03f233dfc109_2aec2ee7a6572023bfa117fbcbc896c8 is 71, key is 1214de9528b24c9e4ede29b1da7eda1c/cf:q/1733268632526/Put/seqid=0 2024-12-03T23:30:32,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412036bc320c8598146218782ed6a6ce1c7c7_7d2ccee371d7ad2ad1f5e8638f243bdc is 71, key is 00faa75b6387aabde43bb7cd98d136e3/cf:q/1733268632525/Put/seqid=0 2024-12-03T23:30:32,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742280_1456 (size=8102) 2024-12-03T23:30:32,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742280_1456 (size=8102) 2024-12-03T23:30:32,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742280_1456 (size=8102) 2024-12-03T23:30:32,760 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:32,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742281_1457 (size=5171) 2024-12-03T23:30:32,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742281_1457 (size=5171) 2024-12-03T23:30:32,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742281_1457 (size=5171) 2024-12-03T23:30:32,765 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203c6a19c75b37e47d7ab4d03f233dfc109_2aec2ee7a6572023bfa117fbcbc896c8 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241203c6a19c75b37e47d7ab4d03f233dfc109_2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:32,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:32,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/.tmp/cf/8cc46b9c2c0747e8963ade57af02efea, store: [table=testtb-testEmptyExportFileSystemState family=cf region=2aec2ee7a6572023bfa117fbcbc896c8] 2024-12-03T23:30:32,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/.tmp/cf/8cc46b9c2c0747e8963ade57af02efea is 214, key is 10e997695fd8f4c9d076f005c34a4923d/cf:q/1733268632526/Put/seqid=0 2024-12-03T23:30:32,769 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412036bc320c8598146218782ed6a6ce1c7c7_7d2ccee371d7ad2ad1f5e8638f243bdc to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e202412036bc320c8598146218782ed6a6ce1c7c7_7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:32,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742282_1458 (size=15029) 2024-12-03T23:30:32,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/.tmp/cf/617549b220d841a6b665bd5dd17f0eae, store: [table=testtb-testEmptyExportFileSystemState family=cf region=7d2ccee371d7ad2ad1f5e8638f243bdc] 2024-12-03T23:30:32,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742282_1458 (size=15029) 2024-12-03T23:30:32,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742282_1458 (size=15029) 2024-12-03T23:30:32,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/.tmp/cf/617549b220d841a6b665bd5dd17f0eae is 214, key is 04a7eab190d83c5ff4090ea06a988b7db/cf:q/1733268632525/Put/seqid=0 2024-12-03T23:30:32,771 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/.tmp/cf/8cc46b9c2c0747e8963ade57af02efea 2024-12-03T23:30:32,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/.tmp/cf/8cc46b9c2c0747e8963ade57af02efea as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/cf/8cc46b9c2c0747e8963ade57af02efea 2024-12-03T23:30:32,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742283_1459 (size=6146) 2024-12-03T23:30:32,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742283_1459 (size=6146) 2024-12-03T23:30:32,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742283_1459 (size=6146) 2024-12-03T23:30:32,777 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/.tmp/cf/617549b220d841a6b665bd5dd17f0eae 2024-12-03T23:30:32,781 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/cf/8cc46b9c2c0747e8963ade57af02efea, entries=46, sequenceid=6, filesize=14.7 K 2024-12-03T23:30:32,781 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/.tmp/cf/617549b220d841a6b665bd5dd17f0eae as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/cf/617549b220d841a6b665bd5dd17f0eae 2024-12-03T23:30:32,782 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 2aec2ee7a6572023bfa117fbcbc896c8 in 60ms, sequenceid=6, compaction requested=false 2024-12-03T23:30:32,782 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-03T23:30:32,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for 2aec2ee7a6572023bfa117fbcbc896c8: 2024-12-03T23:30:32,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-03T23:30:32,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:32,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/cf/8cc46b9c2c0747e8963ade57af02efea] hfiles 2024-12-03T23:30:32,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/cf/8cc46b9c2c0747e8963ade57af02efea for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,785 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/cf/617549b220d841a6b665bd5dd17f0eae, entries=4, sequenceid=6, filesize=6.0 K 2024-12-03T23:30:32,786 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 7d2ccee371d7ad2ad1f5e8638f243bdc in 64ms, sequenceid=6, compaction requested=false 2024-12-03T23:30:32,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for 7d2ccee371d7ad2ad1f5e8638f243bdc: 2024-12-03T23:30:32,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-03T23:30:32,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:32,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/cf/617549b220d841a6b665bd5dd17f0eae] hfiles 2024-12-03T23:30:32,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/cf/617549b220d841a6b665bd5dd17f0eae for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742284_1460 (size=115) 2024-12-03T23:30:32,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742284_1460 (size=115) 2024-12-03T23:30:32,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742284_1460 (size=115) 2024-12-03T23:30:32,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:32,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-03T23:30:32,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-12-03T23:30:32,801 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:32,802 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:32,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742285_1461 (size=115) 2024-12-03T23:30:32,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8 in 234 msec 2024-12-03T23:30:32,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742285_1461 (size=115) 2024-12-03T23:30:32,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742285_1461 (size=115) 2024-12-03T23:30:32,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:32,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-03T23:30:32,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-12-03T23:30:32,806 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:32,806 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:32,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=204, resume processing ppid=203 2024-12-03T23:30:32,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc in 237 msec 2024-12-03T23:30:32,808 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:30:32,809 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:30:32,810 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:30:32,810 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:30:32,810 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:32,811 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241203c6a19c75b37e47d7ab4d03f233dfc109_2aec2ee7a6572023bfa117fbcbc896c8, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e202412036bc320c8598146218782ed6a6ce1c7c7_7d2ccee371d7ad2ad1f5e8638f243bdc] hfiles 2024-12-03T23:30:32,811 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241203c6a19c75b37e47d7ab4d03f233dfc109_2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:32,811 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e202412036bc320c8598146218782ed6a6ce1c7c7_7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:32,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742286_1462 (size=299) 2024-12-03T23:30:32,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742286_1462 (size=299) 2024-12-03T23:30:32,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742286_1462 (size=299) 2024-12-03T23:30:32,818 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:30:32,818 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,819 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742287_1463 (size=983) 2024-12-03T23:30:32,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742287_1463 (size=983) 2024-12-03T23:30:32,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742287_1463 (size=983) 2024-12-03T23:30:32,844 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:30:32,851 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:30:32,851 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,853 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:30:32,853 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-03T23:30:32,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 298 msec 2024-12-03T23:30:32,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-03T23:30:32,870 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-03T23:30:32,870 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268632870 2024-12-03T23:30:32,870 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37331, tgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268632870, rawTgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268632870, srcFsUri=hdfs://localhost:37331, srcDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:32,908 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37331, inputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:32,908 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268632870, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268632870/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,910 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-03T23:30:32,914 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268632870/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:32,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742288_1464 (size=673) 2024-12-03T23:30:32,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742288_1464 (size=673) 2024-12-03T23:30:32,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742288_1464 (size=673) 2024-12-03T23:30:32,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742289_1465 (size=185) 2024-12-03T23:30:32,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742289_1465 (size=185) 2024-12-03T23:30:32,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742289_1465 (size=185) 2024-12-03T23:30:32,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:32,925 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:32,925 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:33,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-8186871928115775353.jar 2024-12-03T23:30:33,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:33,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:33,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-17584897996611294101.jar 2024-12-03T23:30:33,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:33,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:33,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:33,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:33,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:33,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:33,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-03T23:30:33,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-03T23:30:33,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-03T23:30:33,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-03T23:30:33,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-03T23:30:33,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-03T23:30:33,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-03T23:30:33,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-03T23:30:33,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-03T23:30:33,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-03T23:30:33,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-03T23:30:33,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:30:33,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:30:33,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:30:33,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:30:33,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:30:33,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:30:33,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:30:34,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742290_1466 (size=131440) 2024-12-03T23:30:34,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742290_1466 (size=131440) 2024-12-03T23:30:34,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742290_1466 (size=131440) 2024-12-03T23:30:34,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742291_1467 (size=4188619) 2024-12-03T23:30:34,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742291_1467 (size=4188619) 2024-12-03T23:30:34,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742291_1467 (size=4188619) 2024-12-03T23:30:34,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742292_1468 (size=1323991) 2024-12-03T23:30:34,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742292_1468 (size=1323991) 2024-12-03T23:30:34,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742292_1468 (size=1323991) 2024-12-03T23:30:34,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742293_1469 (size=903924) 2024-12-03T23:30:34,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742293_1469 (size=903924) 2024-12-03T23:30:34,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742293_1469 (size=903924) 2024-12-03T23:30:34,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742294_1470 (size=8360360) 2024-12-03T23:30:34,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742294_1470 (size=8360360) 2024-12-03T23:30:34,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742294_1470 (size=8360360) 2024-12-03T23:30:34,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742295_1471 (size=443172) 2024-12-03T23:30:34,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742295_1471 (size=443172) 2024-12-03T23:30:34,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742295_1471 (size=443172) 2024-12-03T23:30:34,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742296_1472 (size=1877034) 2024-12-03T23:30:34,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742296_1472 (size=1877034) 2024-12-03T23:30:34,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742296_1472 (size=1877034) 2024-12-03T23:30:34,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742297_1473 (size=77835) 2024-12-03T23:30:34,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742297_1473 (size=77835) 2024-12-03T23:30:34,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742297_1473 (size=77835) 2024-12-03T23:30:34,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742298_1474 (size=30949) 2024-12-03T23:30:34,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742298_1474 (size=30949) 2024-12-03T23:30:34,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742298_1474 (size=30949) 2024-12-03T23:30:34,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742299_1475 (size=1597213) 2024-12-03T23:30:34,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742299_1475 (size=1597213) 2024-12-03T23:30:34,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742299_1475 (size=1597213) 2024-12-03T23:30:34,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742300_1476 (size=4695811) 2024-12-03T23:30:34,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742300_1476 (size=4695811) 2024-12-03T23:30:34,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742300_1476 (size=4695811) 2024-12-03T23:30:34,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742301_1477 (size=232957) 2024-12-03T23:30:34,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742301_1477 (size=232957) 2024-12-03T23:30:34,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742301_1477 (size=232957) 2024-12-03T23:30:34,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742302_1478 (size=127628) 2024-12-03T23:30:34,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742302_1478 (size=127628) 2024-12-03T23:30:34,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742302_1478 (size=127628) 2024-12-03T23:30:34,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742303_1479 (size=6424741) 2024-12-03T23:30:34,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742303_1479 (size=6424741) 2024-12-03T23:30:34,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742303_1479 (size=6424741) 2024-12-03T23:30:34,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742304_1480 (size=20406) 2024-12-03T23:30:34,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742304_1480 (size=20406) 2024-12-03T23:30:34,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742304_1480 (size=20406) 2024-12-03T23:30:35,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742305_1481 (size=5175431) 2024-12-03T23:30:35,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742305_1481 (size=5175431) 2024-12-03T23:30:35,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742305_1481 (size=5175431) 2024-12-03T23:30:35,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742306_1482 (size=217634) 2024-12-03T23:30:35,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742306_1482 (size=217634) 2024-12-03T23:30:35,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742306_1482 (size=217634) 2024-12-03T23:30:35,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742307_1483 (size=1832290) 2024-12-03T23:30:35,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742307_1483 (size=1832290) 2024-12-03T23:30:35,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742307_1483 (size=1832290) 2024-12-03T23:30:35,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742308_1484 (size=322274) 2024-12-03T23:30:35,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742308_1484 (size=322274) 2024-12-03T23:30:35,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742308_1484 (size=322274) 2024-12-03T23:30:35,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742309_1485 (size=503880) 2024-12-03T23:30:35,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742309_1485 (size=503880) 2024-12-03T23:30:35,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742309_1485 (size=503880) 2024-12-03T23:30:35,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742310_1486 (size=29229) 2024-12-03T23:30:35,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742310_1486 (size=29229) 2024-12-03T23:30:35,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742310_1486 (size=29229) 2024-12-03T23:30:35,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742311_1487 (size=24096) 2024-12-03T23:30:35,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742311_1487 (size=24096) 2024-12-03T23:30:35,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742311_1487 (size=24096) 2024-12-03T23:30:35,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742312_1488 (size=111872) 2024-12-03T23:30:35,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742312_1488 (size=111872) 2024-12-03T23:30:35,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742312_1488 (size=111872) 2024-12-03T23:30:35,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742313_1489 (size=45609) 2024-12-03T23:30:35,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742313_1489 (size=45609) 2024-12-03T23:30:35,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742313_1489 (size=45609) 2024-12-03T23:30:35,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742314_1490 (size=136454) 2024-12-03T23:30:35,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742314_1490 (size=136454) 2024-12-03T23:30:35,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742314_1490 (size=136454) 2024-12-03T23:30:35,525 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-03T23:30:35,527 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-03T23:30:35,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742315_1491 (size=7) 2024-12-03T23:30:35,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742315_1491 (size=7) 2024-12-03T23:30:35,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742315_1491 (size=7) 2024-12-03T23:30:35,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742316_1492 (size=10) 2024-12-03T23:30:35,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742316_1492 (size=10) 2024-12-03T23:30:35,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742316_1492 (size=10) 2024-12-03T23:30:35,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742317_1493 (size=303901) 2024-12-03T23:30:35,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742317_1493 (size=303901) 2024-12-03T23:30:35,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742317_1493 (size=303901) 2024-12-03T23:30:35,582 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:30:35,582 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:30:35,716 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0008_000001 (auth:SIMPLE) from 127.0.0.1:52676 2024-12-03T23:30:36,678 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:30:38,449 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:30:39,917 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-03T23:30:39,918 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-03T23:30:39,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-03T23:30:40,995 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0008_000001 (auth:SIMPLE) from 127.0.0.1:50250 2024-12-03T23:30:41,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742318_1494 (size=349575) 2024-12-03T23:30:41,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742318_1494 (size=349575) 2024-12-03T23:30:41,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742318_1494 (size=349575) 2024-12-03T23:30:42,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742319_1495 (size=8568) 2024-12-03T23:30:42,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742319_1495 (size=8568) 2024-12-03T23:30:42,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742319_1495 (size=8568) 2024-12-03T23:30:42,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742320_1496 (size=460) 2024-12-03T23:30:42,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742320_1496 (size=460) 2024-12-03T23:30:42,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742320_1496 (size=460) 2024-12-03T23:30:42,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742321_1497 (size=8568) 2024-12-03T23:30:42,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742321_1497 (size=8568) 2024-12-03T23:30:42,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742321_1497 (size=8568) 2024-12-03T23:30:42,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742322_1498 (size=349575) 2024-12-03T23:30:42,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742322_1498 (size=349575) 2024-12-03T23:30:42,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742322_1498 (size=349575) 2024-12-03T23:30:43,891 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-03T23:30:43,892 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-03T23:30:43,896 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:43,896 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-03T23:30:43,896 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-03T23:30:43,896 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:43,897 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-03T23:30:43,897 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-03T23:30:43,897 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268632870/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268632870/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:43,897 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268632870/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-03T23:30:43,897 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268632870/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-03T23:30:43,902 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-03T23:30:43,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-03T23:30:43,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-03T23:30:43,905 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268643905"}]},"ts":"1733268643905"} 2024-12-03T23:30:43,906 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-03T23:30:43,906 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-03T23:30:43,907 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-03T23:30:43,908 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7d2ccee371d7ad2ad1f5e8638f243bdc, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2aec2ee7a6572023bfa117fbcbc896c8, UNASSIGN}] 2024-12-03T23:30:43,909 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7d2ccee371d7ad2ad1f5e8638f243bdc, UNASSIGN 2024-12-03T23:30:43,909 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2aec2ee7a6572023bfa117fbcbc896c8, UNASSIGN 2024-12-03T23:30:43,909 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=7d2ccee371d7ad2ad1f5e8638f243bdc, regionState=CLOSING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:30:43,909 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=2aec2ee7a6572023bfa117fbcbc896c8, regionState=CLOSING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:30:43,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7d2ccee371d7ad2ad1f5e8638f243bdc, UNASSIGN because future has completed 2024-12-03T23:30:43,911 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:30:43,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:30:43,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2aec2ee7a6572023bfa117fbcbc896c8, UNASSIGN because future has completed 2024-12-03T23:30:43,912 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:30:43,912 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:30:44,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-03T23:30:44,063 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:44,063 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:30:44,063 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing 7d2ccee371d7ad2ad1f5e8638f243bdc, disabling compactions & flushes 2024-12-03T23:30:44,063 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:44,063 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:44,063 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. after waiting 0 ms 2024-12-03T23:30:44,063 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:44,064 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:44,064 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:30:44,064 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing 2aec2ee7a6572023bfa117fbcbc896c8, disabling compactions & flushes 2024-12-03T23:30:44,064 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:44,064 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:44,064 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. after waiting 0 ms 2024-12-03T23:30:44,064 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:44,066 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:30:44,067 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:30:44,067 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:30:44,067 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc. 2024-12-03T23:30:44,067 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for 7d2ccee371d7ad2ad1f5e8638f243bdc: Waiting for close lock at 1733268644063Running coprocessor pre-close hooks at 1733268644063Disabling compacts and flushes for region at 1733268644063Disabling writes for close at 1733268644063Writing region close event to WAL at 1733268644064 (+1 ms)Running coprocessor post-close hooks at 1733268644067 (+3 ms)Closed at 1733268644067 2024-12-03T23:30:44,067 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:30:44,067 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8. 2024-12-03T23:30:44,067 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for 2aec2ee7a6572023bfa117fbcbc896c8: Waiting for close lock at 1733268644064Running coprocessor pre-close hooks at 1733268644064Disabling compacts and flushes for region at 1733268644064Disabling writes for close at 1733268644064Writing region close event to WAL at 1733268644065 (+1 ms)Running coprocessor post-close hooks at 1733268644067 (+2 ms)Closed at 1733268644067 2024-12-03T23:30:44,069 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed 7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:44,070 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=7d2ccee371d7ad2ad1f5e8638f243bdc, regionState=CLOSED 2024-12-03T23:30:44,070 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed 2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:44,070 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=2aec2ee7a6572023bfa117fbcbc896c8, regionState=CLOSED 2024-12-03T23:30:44,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:30:44,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:30:44,075 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=208 2024-12-03T23:30:44,075 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure 7d2ccee371d7ad2ad1f5e8638f243bdc, server=ed8014902436,37351,1733268370600 in 162 msec 2024-12-03T23:30:44,076 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=209 2024-12-03T23:30:44,076 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure 2aec2ee7a6572023bfa117fbcbc896c8, server=ed8014902436,39085,1733268370401 in 161 msec 2024-12-03T23:30:44,077 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7d2ccee371d7ad2ad1f5e8638f243bdc, UNASSIGN in 167 msec 2024-12-03T23:30:44,078 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=207 2024-12-03T23:30:44,078 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2aec2ee7a6572023bfa117fbcbc896c8, UNASSIGN in 168 msec 2024-12-03T23:30:44,080 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-12-03T23:30:44,080 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 172 msec 2024-12-03T23:30:44,081 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268644081"}]},"ts":"1733268644081"} 2024-12-03T23:30:44,082 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-03T23:30:44,082 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-03T23:30:44,084 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 181 msec 2024-12-03T23:30:44,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-03T23:30:44,221 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-03T23:30:44,222 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,224 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,225 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,227 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,229 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:44,229 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:44,230 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/recovered.edits] 2024-12-03T23:30:44,230 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/recovered.edits] 2024-12-03T23:30:44,244 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/cf/617549b220d841a6b665bd5dd17f0eae to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/cf/617549b220d841a6b665bd5dd17f0eae 2024-12-03T23:30:44,244 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/cf/8cc46b9c2c0747e8963ade57af02efea to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/cf/8cc46b9c2c0747e8963ade57af02efea 2024-12-03T23:30:44,246 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc/recovered.edits/9.seqid 2024-12-03T23:30:44,247 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8/recovered.edits/9.seqid 2024-12-03T23:30:44,247 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:44,247 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testEmptyExportFileSystemState/2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:44,248 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-03T23:30:44,248 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-03T23:30:44,249 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-12-03T23:30:44,251 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241203c6a19c75b37e47d7ab4d03f233dfc109_2aec2ee7a6572023bfa117fbcbc896c8 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241203c6a19c75b37e47d7ab4d03f233dfc109_2aec2ee7a6572023bfa117fbcbc896c8 2024-12-03T23:30:44,252 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e202412036bc320c8598146218782ed6a6ce1c7c7_7d2ccee371d7ad2ad1f5e8638f243bdc to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e202412036bc320c8598146218782ed6a6ce1c7c7_7d2ccee371d7ad2ad1f5e8638f243bdc 2024-12-03T23:30:44,253 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-03T23:30:44,255 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,258 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-03T23:30:44,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,294 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-03T23:30:44,294 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-03T23:30:44,294 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-03T23:30:44,294 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-03T23:30:44,296 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-03T23:30:44,297 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,298 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-03T23:30:44,298 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268644298"}]},"ts":"9223372036854775807"} 2024-12-03T23:30:44,298 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268644298"}]},"ts":"9223372036854775807"} 2024-12-03T23:30:44,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:44,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:44,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:44,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:44,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-03T23:30:44,301 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-03T23:30:44,301 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 7d2ccee371d7ad2ad1f5e8638f243bdc, NAME => 'testtb-testEmptyExportFileSystemState,,1733268631538.7d2ccee371d7ad2ad1f5e8638f243bdc.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 2aec2ee7a6572023bfa117fbcbc896c8, NAME => 'testtb-testEmptyExportFileSystemState,1,1733268631538.2aec2ee7a6572023bfa117fbcbc896c8.', STARTKEY => '1', ENDKEY => ''}] 2024-12-03T23:30:44,301 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-03T23:30:44,301 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:44,301 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:44,301 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:44,301 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:44,302 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733268644301"}]},"ts":"9223372036854775807"} 2024-12-03T23:30:44,303 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-03T23:30:44,304 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,304 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 82 msec 2024-12-03T23:30:44,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-03T23:30:44,409 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-12-03T23:30:44,409 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-03T23:30:44,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-03T23:30:44,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:44,417 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-03T23:30:44,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-03T23:30:44,443 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=820 (was 807) Potentially hanging thread: Thread-7441 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-185930833_1 at /127.0.0.1:40140 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-185930833_1 at /127.0.0.1:41120 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:43607 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 152979) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:41152 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:35655 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35655 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:34094 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:37783 from appattempt_1733268377251_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:40160 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=817 (was 793) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=611 (was 613), ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=2089 (was 2942) 2024-12-03T23:30:44,443 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=820 is superior to 500 2024-12-03T23:30:44,460 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=820, OpenFileDescriptor=817, MaxFileDescriptor=1048576, SystemLoadAverage=611, ProcessCount=17, AvailableMemoryMB=2088 2024-12-03T23:30:44,460 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=820 is superior to 500 2024-12-03T23:30:44,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:30:44,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-03T23:30:44,464 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:30:44,464 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-12-03T23:30:44,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-03T23:30:44,465 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:30:44,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742323_1499 (size=440) 2024-12-03T23:30:44,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742323_1499 (size=440) 2024-12-03T23:30:44,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742323_1499 (size=440) 2024-12-03T23:30:44,478 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5cb7960ee15b899fc682c05cfcfa76be, NAME => 'testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:44,478 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => a2ce8b5750bad6679883aabcdedd0e0e, NAME => 'testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:44,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742324_1500 (size=65) 2024-12-03T23:30:44,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742324_1500 (size=65) 2024-12-03T23:30:44,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742324_1500 (size=65) 2024-12-03T23:30:44,493 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:44,493 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing a2ce8b5750bad6679883aabcdedd0e0e, disabling compactions & flushes 2024-12-03T23:30:44,493 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:30:44,493 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:30:44,493 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. after waiting 0 ms 2024-12-03T23:30:44,493 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:30:44,493 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:30:44,493 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for a2ce8b5750bad6679883aabcdedd0e0e: Waiting for close lock at 1733268644493Disabling compacts and flushes for region at 1733268644493Disabling writes for close at 1733268644493Writing region close event to WAL at 1733268644493Closed at 1733268644493 2024-12-03T23:30:44,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742325_1501 (size=65) 2024-12-03T23:30:44,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742325_1501 (size=65) 2024-12-03T23:30:44,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742325_1501 (size=65) 2024-12-03T23:30:44,498 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:44,498 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 5cb7960ee15b899fc682c05cfcfa76be, disabling compactions & flushes 2024-12-03T23:30:44,498 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:30:44,498 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:30:44,498 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. after waiting 0 ms 2024-12-03T23:30:44,498 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:30:44,498 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:30:44,498 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5cb7960ee15b899fc682c05cfcfa76be: Waiting for close lock at 1733268644498Disabling compacts and flushes for region at 1733268644498Disabling writes for close at 1733268644498Writing region close event to WAL at 1733268644498Closed at 1733268644498 2024-12-03T23:30:44,499 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:30:44,499 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733268644499"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268644499"}]},"ts":"1733268644499"} 2024-12-03T23:30:44,499 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733268644499"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268644499"}]},"ts":"1733268644499"} 2024-12-03T23:30:44,501 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-03T23:30:44,505 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:30:44,505 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268644505"}]},"ts":"1733268644505"} 2024-12-03T23:30:44,506 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-03T23:30:44,507 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:30:44,508 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:30:44,508 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:30:44,508 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:30:44,508 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:30:44,508 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:30:44,508 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:30:44,508 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:30:44,508 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:30:44,508 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:30:44,508 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:30:44,508 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5cb7960ee15b899fc682c05cfcfa76be, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a2ce8b5750bad6679883aabcdedd0e0e, ASSIGN}] 2024-12-03T23:30:44,509 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5cb7960ee15b899fc682c05cfcfa76be, ASSIGN 2024-12-03T23:30:44,509 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a2ce8b5750bad6679883aabcdedd0e0e, ASSIGN 2024-12-03T23:30:44,510 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5cb7960ee15b899fc682c05cfcfa76be, ASSIGN; state=OFFLINE, location=ed8014902436,39085,1733268370401; forceNewPlan=false, retain=false 2024-12-03T23:30:44,510 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a2ce8b5750bad6679883aabcdedd0e0e, ASSIGN; state=OFFLINE, location=ed8014902436,41121,1733268370525; forceNewPlan=false, retain=false 2024-12-03T23:30:44,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-03T23:30:44,661 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:30:44,662 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=5cb7960ee15b899fc682c05cfcfa76be, regionState=OPENING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:30:44,662 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=a2ce8b5750bad6679883aabcdedd0e0e, regionState=OPENING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:30:44,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a2ce8b5750bad6679883aabcdedd0e0e, ASSIGN because future has completed 2024-12-03T23:30:44,665 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:30:44,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5cb7960ee15b899fc682c05cfcfa76be, ASSIGN because future has completed 2024-12-03T23:30:44,667 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:30:44,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-03T23:30:44,825 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:30:44,825 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => a2ce8b5750bad6679883aabcdedd0e0e, NAME => 'testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e.', STARTKEY => '1', ENDKEY => ''} 2024-12-03T23:30:44,826 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:30:44,826 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. service=AccessControlService 2024-12-03T23:30:44,826 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => 5cb7960ee15b899fc682c05cfcfa76be, NAME => 'testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be.', STARTKEY => '', ENDKEY => '1'} 2024-12-03T23:30:44,827 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:30:44,827 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. service=AccessControlService 2024-12-03T23:30:44,827 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:44,827 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:44,827 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:30:44,827 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:44,827 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:44,827 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:44,827 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:30:44,827 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:44,827 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:44,829 INFO [StoreOpener-a2ce8b5750bad6679883aabcdedd0e0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:44,829 INFO [StoreOpener-5cb7960ee15b899fc682c05cfcfa76be-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:44,831 INFO [StoreOpener-a2ce8b5750bad6679883aabcdedd0e0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a2ce8b5750bad6679883aabcdedd0e0e columnFamilyName cf 2024-12-03T23:30:44,831 INFO [StoreOpener-5cb7960ee15b899fc682c05cfcfa76be-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5cb7960ee15b899fc682c05cfcfa76be columnFamilyName cf 2024-12-03T23:30:44,833 DEBUG [StoreOpener-a2ce8b5750bad6679883aabcdedd0e0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:44,833 DEBUG [StoreOpener-5cb7960ee15b899fc682c05cfcfa76be-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:44,833 INFO [StoreOpener-5cb7960ee15b899fc682c05cfcfa76be-1 {}] regionserver.HStore(327): Store=5cb7960ee15b899fc682c05cfcfa76be/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:30:44,833 INFO [StoreOpener-a2ce8b5750bad6679883aabcdedd0e0e-1 {}] regionserver.HStore(327): Store=a2ce8b5750bad6679883aabcdedd0e0e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:30:44,834 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:44,834 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:44,835 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:44,835 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:44,835 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:44,835 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:44,835 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:44,836 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:44,836 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:44,836 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:44,837 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:44,838 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:44,840 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:30:44,840 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:30:44,840 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened 5cb7960ee15b899fc682c05cfcfa76be; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74027291, jitterRate=0.10309259593486786}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:30:44,840 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:44,840 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened a2ce8b5750bad6679883aabcdedd0e0e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73529779, jitterRate=0.09567908942699432}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:30:44,840 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:44,841 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for a2ce8b5750bad6679883aabcdedd0e0e: Running coprocessor pre-open hook at 1733268644827Writing region info on filesystem at 1733268644827Initializing all the Stores at 1733268644828 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268644828Cleaning up temporary data from old regions at 1733268644836 (+8 ms)Running coprocessor post-open hooks at 1733268644840 (+4 ms)Region opened successfully at 1733268644841 (+1 ms) 2024-12-03T23:30:44,841 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for 5cb7960ee15b899fc682c05cfcfa76be: Running coprocessor pre-open hook at 1733268644827Writing region info on filesystem at 1733268644828 (+1 ms)Initializing all the Stores at 1733268644829 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268644829Cleaning up temporary data from old regions at 1733268644836 (+7 ms)Running coprocessor post-open hooks at 1733268644840 (+4 ms)Region opened successfully at 1733268644841 (+1 ms) 2024-12-03T23:30:44,842 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e., pid=216, masterSystemTime=1733268644819 2024-12-03T23:30:44,842 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be., pid=217, masterSystemTime=1733268644820 2024-12-03T23:30:44,843 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:30:44,843 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:30:44,844 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=a2ce8b5750bad6679883aabcdedd0e0e, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:30:44,844 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:30:44,844 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:30:44,845 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=5cb7960ee15b899fc682c05cfcfa76be, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:30:44,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:30:44,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:30:44,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=215 2024-12-03T23:30:44,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e, server=ed8014902436,41121,1733268370525 in 181 msec 2024-12-03T23:30:44,848 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=214 2024-12-03T23:30:44,848 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be, server=ed8014902436,39085,1733268370401 in 179 msec 2024-12-03T23:30:44,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a2ce8b5750bad6679883aabcdedd0e0e, ASSIGN in 339 msec 2024-12-03T23:30:44,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=214, resume processing ppid=213 2024-12-03T23:30:44,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5cb7960ee15b899fc682c05cfcfa76be, ASSIGN in 340 msec 2024-12-03T23:30:44,849 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:30:44,849 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268644849"}]},"ts":"1733268644849"} 2024-12-03T23:30:44,851 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-03T23:30:44,851 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:30:44,851 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-03T23:30:44,854 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-03T23:30:44,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:44,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:44,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:44,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:30:44,919 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:44,919 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:44,919 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:44,919 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:44,919 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:44,920 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:44,920 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:44,920 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-03T23:30:44,922 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 455 msec 2024-12-03T23:30:45,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-03T23:30:45,091 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-03T23:30:45,091 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-03T23:30:45,096 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-03T23:30:45,096 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:30:45,097 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:30:45,098 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-03T23:30:45,103 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-03T23:30:45,110 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-03T23:30:45,114 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-03T23:30:45,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268645114 (current time:1733268645114). 2024-12-03T23:30:45,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:30:45,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-03T23:30:45,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:30:45,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76fb2975, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:45,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:45,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:45,116 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:45,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:45,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:45,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@282347e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:45,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:45,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:45,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:45,118 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33664, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:45,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7335ac08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:45,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:45,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:45,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:45,121 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49362, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:45,122 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:45,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:45,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:45,122 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:45,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:45,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11e721a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:45,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:45,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:45,124 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:45,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:45,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:45,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d274c26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:45,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:45,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:45,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:45,125 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33686, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:45,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@553a7f6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:45,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:45,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:45,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:45,127 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49376, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:45,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:30:45,130 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:45,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:45,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:45,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:45,130 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:45,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-03T23:30:45,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:30:45,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-03T23:30:45,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-03T23:30:45,132 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:30:45,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-03T23:30:45,133 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:30:45,135 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:30:45,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742326_1502 (size=161) 2024-12-03T23:30:45,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742326_1502 (size=161) 2024-12-03T23:30:45,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742326_1502 (size=161) 2024-12-03T23:30:45,140 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:30:45,141 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e}] 2024-12-03T23:30:45,141 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:45,141 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:45,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-03T23:30:45,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-12-03T23:30:45,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-12-03T23:30:45,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:30:45,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:30:45,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for a2ce8b5750bad6679883aabcdedd0e0e: 2024-12-03T23:30:45,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. for emptySnaptb0-testExportWithChecksum completed. 2024-12-03T23:30:45,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for 5cb7960ee15b899fc682c05cfcfa76be: 2024-12-03T23:30:45,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. for emptySnaptb0-testExportWithChecksum completed. 2024-12-03T23:30:45,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-03T23:30:45,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:45,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:30:45,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-03T23:30:45,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:45,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:30:45,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742327_1503 (size=68) 2024-12-03T23:30:45,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742327_1503 (size=68) 2024-12-03T23:30:45,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:30:45,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742327_1503 (size=68) 2024-12-03T23:30:45,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-12-03T23:30:45,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742328_1504 (size=68) 2024-12-03T23:30:45,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742328_1504 (size=68) 2024-12-03T23:30:45,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742328_1504 (size=68) 2024-12-03T23:30:45,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:30:45,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-12-03T23:30:45,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-12-03T23:30:45,304 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:45,304 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:45,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-12-03T23:30:45,304 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:45,305 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:45,306 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be in 164 msec 2024-12-03T23:30:45,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=220, resume processing ppid=218 2024-12-03T23:30:45,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e in 164 msec 2024-12-03T23:30:45,307 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:30:45,307 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:30:45,308 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:30:45,308 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:30:45,308 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:45,309 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-03T23:30:45,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742329_1505 (size=60) 2024-12-03T23:30:45,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742329_1505 (size=60) 2024-12-03T23:30:45,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742329_1505 (size=60) 2024-12-03T23:30:45,324 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:30:45,324 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-03T23:30:45,325 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-03T23:30:45,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742330_1506 (size=641) 2024-12-03T23:30:45,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742330_1506 (size=641) 2024-12-03T23:30:45,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742330_1506 (size=641) 2024-12-03T23:30:45,337 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:30:45,343 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:30:45,343 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-03T23:30:45,344 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:30:45,344 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-03T23:30:45,346 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 213 msec 2024-12-03T23:30:45,422 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:30:45,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-03T23:30:45,450 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-03T23:30:45,454 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39085 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:30:45,455 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:30:45,456 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-03T23:30:45,458 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-03T23:30:45,458 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:30:45,458 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:30:45,459 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-03T23:30:45,462 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-03T23:30:45,467 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-03T23:30:45,469 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-03T23:30:45,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268645469 (current time:1733268645469). 2024-12-03T23:30:45,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:30:45,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-03T23:30:45,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:30:45,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@195a4b11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:45,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:45,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:45,470 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:45,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:45,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:45,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bcc1bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:45,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:45,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:45,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:45,471 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33708, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:45,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@588e8379, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:45,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:45,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:45,473 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:45,474 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49388, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:45,474 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:45,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:45,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:45,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:45,475 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:45,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64f5ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:45,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:30:45,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:30:45,476 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:30:45,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:30:45,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:30:45,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57c5447a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:45,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:30:45,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:30:45,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:45,477 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33720, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:30:45,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6806c2cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:30:45,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:30:45,478 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:30:45,478 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:30:45,479 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49400, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:30:45,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2] 2024-12-03T23:30:45,481 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753. 2024-12-03T23:30:45,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:30:45,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:45,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:30:45,482 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:30:45,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-03T23:30:45,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:30:45,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-03T23:30:45,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-03T23:30:45,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-03T23:30:45,484 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:30:45,485 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:30:45,486 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:30:45,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742331_1507 (size=156) 2024-12-03T23:30:45,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742331_1507 (size=156) 2024-12-03T23:30:45,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742331_1507 (size=156) 2024-12-03T23:30:45,492 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:30:45,492 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e}] 2024-12-03T23:30:45,492 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:45,492 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:45,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-03T23:30:45,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-12-03T23:30:45,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39085 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-12-03T23:30:45,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:30:45,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:30:45,646 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing a2ce8b5750bad6679883aabcdedd0e0e 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-03T23:30:45,646 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing 5cb7960ee15b899fc682c05cfcfa76be 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-03T23:30:45,668 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203656047715ed7412186c94036ba514ccc_5cb7960ee15b899fc682c05cfcfa76be is 71, key is 02eb0691cdbdfe67d59d9d028d5885b2/cf:q/1733268645454/Put/seqid=0 2024-12-03T23:30:45,668 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203e8d8894ff75b4eedb0440aa05441b8f0_a2ce8b5750bad6679883aabcdedd0e0e is 71, key is 1a0702222d8390e434b37a5fd430ee6f/cf:q/1733268645455/Put/seqid=0 2024-12-03T23:30:45,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742332_1508 (size=5241) 2024-12-03T23:30:45,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742333_1509 (size=8032) 2024-12-03T23:30:45,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742333_1509 (size=8032) 2024-12-03T23:30:45,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742333_1509 (size=8032) 2024-12-03T23:30:45,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742332_1508 (size=5241) 2024-12-03T23:30:45,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742332_1508 (size=5241) 2024-12-03T23:30:45,678 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:45,679 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:45,682 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203e8d8894ff75b4eedb0440aa05441b8f0_a2ce8b5750bad6679883aabcdedd0e0e to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241203e8d8894ff75b4eedb0440aa05441b8f0_a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:45,683 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203656047715ed7412186c94036ba514ccc_5cb7960ee15b899fc682c05cfcfa76be to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241203656047715ed7412186c94036ba514ccc_5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:45,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/.tmp/cf/a22020723917458497acef23688b0740, store: [table=testtb-testExportWithChecksum family=cf region=a2ce8b5750bad6679883aabcdedd0e0e] 2024-12-03T23:30:45,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/.tmp/cf/5eca9104720d4e358ba2e42844ff7f8b, store: [table=testtb-testExportWithChecksum family=cf region=5cb7960ee15b899fc682c05cfcfa76be] 2024-12-03T23:30:45,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/.tmp/cf/a22020723917458497acef23688b0740 is 206, key is 19ac99837d4df65ae7f295315a1c4e8ab/cf:q/1733268645455/Put/seqid=0 2024-12-03T23:30:45,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/.tmp/cf/5eca9104720d4e358ba2e42844ff7f8b is 206, key is 06f8da6a7429c58b3e5c09179e468025c/cf:q/1733268645454/Put/seqid=0 2024-12-03T23:30:45,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742334_1510 (size=14451) 2024-12-03T23:30:45,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742334_1510 (size=14451) 2024-12-03T23:30:45,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742334_1510 (size=14451) 2024-12-03T23:30:45,694 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/.tmp/cf/a22020723917458497acef23688b0740 2024-12-03T23:30:45,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742335_1511 (size=6308) 2024-12-03T23:30:45,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742335_1511 (size=6308) 2024-12-03T23:30:45,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742335_1511 (size=6308) 2024-12-03T23:30:45,696 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/.tmp/cf/5eca9104720d4e358ba2e42844ff7f8b 2024-12-03T23:30:45,699 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/.tmp/cf/a22020723917458497acef23688b0740 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/cf/a22020723917458497acef23688b0740 2024-12-03T23:30:45,700 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/.tmp/cf/5eca9104720d4e358ba2e42844ff7f8b as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/cf/5eca9104720d4e358ba2e42844ff7f8b 2024-12-03T23:30:45,703 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/cf/a22020723917458497acef23688b0740, entries=45, sequenceid=6, filesize=14.1 K 2024-12-03T23:30:45,704 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for a2ce8b5750bad6679883aabcdedd0e0e in 59ms, sequenceid=6, compaction requested=false 2024-12-03T23:30:45,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-03T23:30:45,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for a2ce8b5750bad6679883aabcdedd0e0e: 2024-12-03T23:30:45,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. for snaptb0-testExportWithChecksum completed. 2024-12-03T23:30:45,705 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-03T23:30:45,705 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:45,705 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/cf/a22020723917458497acef23688b0740] hfiles 2024-12-03T23:30:45,705 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/cf/a22020723917458497acef23688b0740 for snapshot=snaptb0-testExportWithChecksum 2024-12-03T23:30:45,705 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/cf/5eca9104720d4e358ba2e42844ff7f8b, entries=5, sequenceid=6, filesize=6.2 K 2024-12-03T23:30:45,705 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 5cb7960ee15b899fc682c05cfcfa76be in 60ms, sequenceid=6, compaction requested=false 2024-12-03T23:30:45,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for 5cb7960ee15b899fc682c05cfcfa76be: 2024-12-03T23:30:45,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. for snaptb0-testExportWithChecksum completed. 2024-12-03T23:30:45,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-03T23:30:45,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:30:45,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/cf/5eca9104720d4e358ba2e42844ff7f8b] hfiles 2024-12-03T23:30:45,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/cf/5eca9104720d4e358ba2e42844ff7f8b for snapshot=snaptb0-testExportWithChecksum 2024-12-03T23:30:45,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742336_1512 (size=107) 2024-12-03T23:30:45,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742337_1513 (size=107) 2024-12-03T23:30:45,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742336_1512 (size=107) 2024-12-03T23:30:45,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742336_1512 (size=107) 2024-12-03T23:30:45,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742337_1513 (size=107) 2024-12-03T23:30:45,711 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:30:45,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742337_1513 (size=107) 2024-12-03T23:30:45,711 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-12-03T23:30:45,711 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:30:45,711 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-12-03T23:30:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-12-03T23:30:45,711 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-12-03T23:30:45,711 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:45,711 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:45,712 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:45,713 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e in 220 msec 2024-12-03T23:30:45,714 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=222, resume processing ppid=221 2024-12-03T23:30:45,714 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be in 220 msec 2024-12-03T23:30:45,714 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:30:45,714 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:30:45,715 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:30:45,715 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:30:45,715 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:30:45,716 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241203e8d8894ff75b4eedb0440aa05441b8f0_a2ce8b5750bad6679883aabcdedd0e0e, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241203656047715ed7412186c94036ba514ccc_5cb7960ee15b899fc682c05cfcfa76be] hfiles 2024-12-03T23:30:45,716 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241203e8d8894ff75b4eedb0440aa05441b8f0_a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:30:45,716 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241203656047715ed7412186c94036ba514ccc_5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:30:45,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742338_1514 (size=291) 2024-12-03T23:30:45,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742338_1514 (size=291) 2024-12-03T23:30:45,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742338_1514 (size=291) 2024-12-03T23:30:45,722 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:30:45,722 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-03T23:30:45,723 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-03T23:30:45,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742339_1515 (size=951) 2024-12-03T23:30:45,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742339_1515 (size=951) 2024-12-03T23:30:45,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742339_1515 (size=951) 2024-12-03T23:30:45,731 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:30:45,736 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:30:45,736 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-03T23:30:45,737 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:30:45,737 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-03T23:30:45,738 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 255 msec 2024-12-03T23:30:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-03T23:30:45,799 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-03T23:30:45,799 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799 2024-12-03T23:30:45,800 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799, srcFsUri=hdfs://localhost:37331, srcDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:45,825 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37331, inputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:30:45,825 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@3a8441fb, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-03T23:30:45,827 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-03T23:30:45,830 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-03T23:30:45,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:45,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:45,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:46,674 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-9781338850434288013.jar 2024-12-03T23:30:46,674 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:46,675 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:46,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-12533462050587331069.jar 2024-12-03T23:30:46,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:46,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:46,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:46,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:46,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:46,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:30:46,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-03T23:30:46,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-03T23:30:46,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-03T23:30:46,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-03T23:30:46,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-03T23:30:46,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-03T23:30:46,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-03T23:30:46,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-03T23:30:46,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-03T23:30:46,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-03T23:30:46,732 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-03T23:30:46,732 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:30:46,732 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:30:46,732 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:30:46,732 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:30:46,732 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:30:46,733 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:30:46,733 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:30:46,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742340_1516 (size=131440) 2024-12-03T23:30:46,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742340_1516 (size=131440) 2024-12-03T23:30:46,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742340_1516 (size=131440) 2024-12-03T23:30:46,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742341_1517 (size=6424741) 2024-12-03T23:30:46,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742341_1517 (size=6424741) 2024-12-03T23:30:46,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742341_1517 (size=6424741) 2024-12-03T23:30:46,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742342_1518 (size=4188619) 2024-12-03T23:30:46,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742342_1518 (size=4188619) 2024-12-03T23:30:46,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742342_1518 (size=4188619) 2024-12-03T23:30:46,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742343_1519 (size=1323991) 2024-12-03T23:30:46,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742343_1519 (size=1323991) 2024-12-03T23:30:46,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742343_1519 (size=1323991) 2024-12-03T23:30:46,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742344_1520 (size=903924) 2024-12-03T23:30:46,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742344_1520 (size=903924) 2024-12-03T23:30:46,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742344_1520 (size=903924) 2024-12-03T23:30:47,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742345_1521 (size=8360360) 2024-12-03T23:30:47,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742345_1521 (size=8360360) 2024-12-03T23:30:47,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742345_1521 (size=8360360) 2024-12-03T23:30:47,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742346_1522 (size=1877034) 2024-12-03T23:30:47,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742346_1522 (size=1877034) 2024-12-03T23:30:47,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742346_1522 (size=1877034) 2024-12-03T23:30:47,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742347_1523 (size=443172) 2024-12-03T23:30:47,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742347_1523 (size=443172) 2024-12-03T23:30:47,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742347_1523 (size=443172) 2024-12-03T23:30:47,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742348_1524 (size=77835) 2024-12-03T23:30:47,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742348_1524 (size=77835) 2024-12-03T23:30:47,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742348_1524 (size=77835) 2024-12-03T23:30:47,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742349_1525 (size=30949) 2024-12-03T23:30:47,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742349_1525 (size=30949) 2024-12-03T23:30:47,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742349_1525 (size=30949) 2024-12-03T23:30:47,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742350_1526 (size=1597213) 2024-12-03T23:30:47,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742350_1526 (size=1597213) 2024-12-03T23:30:47,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742350_1526 (size=1597213) 2024-12-03T23:30:47,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742351_1527 (size=4695811) 2024-12-03T23:30:47,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742351_1527 (size=4695811) 2024-12-03T23:30:47,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742351_1527 (size=4695811) 2024-12-03T23:30:47,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742352_1528 (size=232957) 2024-12-03T23:30:47,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742352_1528 (size=232957) 2024-12-03T23:30:47,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742352_1528 (size=232957) 2024-12-03T23:30:47,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742353_1529 (size=127628) 2024-12-03T23:30:47,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742353_1529 (size=127628) 2024-12-03T23:30:47,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742353_1529 (size=127628) 2024-12-03T23:30:47,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742354_1530 (size=20406) 2024-12-03T23:30:47,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742354_1530 (size=20406) 2024-12-03T23:30:47,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742354_1530 (size=20406) 2024-12-03T23:30:47,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742355_1531 (size=5175431) 2024-12-03T23:30:47,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742355_1531 (size=5175431) 2024-12-03T23:30:47,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742355_1531 (size=5175431) 2024-12-03T23:30:47,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742356_1532 (size=217634) 2024-12-03T23:30:47,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742356_1532 (size=217634) 2024-12-03T23:30:47,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742356_1532 (size=217634) 2024-12-03T23:30:47,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742357_1533 (size=1832290) 2024-12-03T23:30:47,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742357_1533 (size=1832290) 2024-12-03T23:30:47,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742357_1533 (size=1832290) 2024-12-03T23:30:47,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742358_1534 (size=322274) 2024-12-03T23:30:47,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742358_1534 (size=322274) 2024-12-03T23:30:47,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742358_1534 (size=322274) 2024-12-03T23:30:47,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742359_1535 (size=503880) 2024-12-03T23:30:47,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742359_1535 (size=503880) 2024-12-03T23:30:47,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742359_1535 (size=503880) 2024-12-03T23:30:47,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742360_1536 (size=29229) 2024-12-03T23:30:47,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742360_1536 (size=29229) 2024-12-03T23:30:47,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742360_1536 (size=29229) 2024-12-03T23:30:47,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742361_1537 (size=24096) 2024-12-03T23:30:47,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742361_1537 (size=24096) 2024-12-03T23:30:47,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742361_1537 (size=24096) 2024-12-03T23:30:47,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742362_1538 (size=111872) 2024-12-03T23:30:47,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742362_1538 (size=111872) 2024-12-03T23:30:47,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742362_1538 (size=111872) 2024-12-03T23:30:47,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742363_1539 (size=45609) 2024-12-03T23:30:47,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742363_1539 (size=45609) 2024-12-03T23:30:47,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742363_1539 (size=45609) 2024-12-03T23:30:47,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742364_1540 (size=136454) 2024-12-03T23:30:47,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742364_1540 (size=136454) 2024-12-03T23:30:47,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742364_1540 (size=136454) 2024-12-03T23:30:47,259 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-03T23:30:47,261 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-03T23:30:47,262 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.1 K 2024-12-03T23:30:47,262 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-12-03T23:30:47,262 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.2 K 2024-12-03T23:30:47,262 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-03T23:30:47,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742365_1541 (size=1023) 2024-12-03T23:30:47,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742365_1541 (size=1023) 2024-12-03T23:30:47,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742365_1541 (size=1023) 2024-12-03T23:30:47,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742366_1542 (size=35) 2024-12-03T23:30:47,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742366_1542 (size=35) 2024-12-03T23:30:47,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742366_1542 (size=35) 2024-12-03T23:30:47,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742367_1543 (size=304042) 2024-12-03T23:30:47,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742367_1543 (size=304042) 2024-12-03T23:30:47,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742367_1543 (size=304042) 2024-12-03T23:30:48,260 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:30:48,260 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:30:48,263 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0008_000001 (auth:SIMPLE) from 127.0.0.1:39802 2024-12-03T23:30:48,273 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0008/container_1733268377251_0008_01_000001/launch_container.sh] 2024-12-03T23:30:48,273 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0008/container_1733268377251_0008_01_000001/container_tokens] 2024-12-03T23:30:48,273 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0008/container_1733268377251_0008_01_000001/sysfs] 2024-12-03T23:30:48,531 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:46980 2024-12-03T23:30:50,329 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:30:50,366 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-03T23:30:50,366 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-03T23:30:50,367 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-03T23:30:53,113 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:42820 2024-12-03T23:30:53,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742368_1544 (size=349740) 2024-12-03T23:30:53,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742368_1544 (size=349740) 2024-12-03T23:30:53,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742368_1544 (size=349740) 2024-12-03T23:30:55,364 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:52450 2024-12-03T23:30:55,364 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:34596 2024-12-03T23:30:55,869 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:30:56,234 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:52462 2024-12-03T23:30:56,239 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:34606 2024-12-03T23:30:58,263 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0009_01_000006 while processing FINISH_CONTAINERS event Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241203e8d8894ff75b4eedb0440aa05441b8f0_a2ce8b5750bad6679883aabcdedd0e0e and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241203e8d8894ff75b4eedb0440aa05441b8f0_a2ce8b5750bad6679883aabcdedd0e0e. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-03T23:31:01,840 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000005/launch_container.sh] 2024-12-03T23:31:01,840 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000005/container_tokens] 2024-12-03T23:31:01,841 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000005/sysfs] 2024-12-03T23:31:02,176 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000004/launch_container.sh] 2024-12-03T23:31:02,176 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000004/container_tokens] 2024-12-03T23:31:02,177 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000004/sysfs] 2024-12-03T23:31:02,264 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:40814 2024-12-03T23:31:02,312 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000002/launch_container.sh] 2024-12-03T23:31:02,312 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000002/container_tokens] 2024-12-03T23:31:02,312 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241203656047715ed7412186c94036ba514ccc_5cb7960ee15b899fc682c05cfcfa76be and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241203656047715ed7412186c94036ba514ccc_5cb7960ee15b899fc682c05cfcfa76be. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/cf/5eca9104720d4e358ba2e42844ff7f8b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/archive/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/cf/5eca9104720d4e358ba2e42844ff7f8b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/cf/a22020723917458497acef23688b0740 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/archive/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/cf/a22020723917458497acef23688b0740. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-03T23:31:03,262 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:58510 2024-12-03T23:31:03,271 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:40820 2024-12-03T23:31:04,254 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:40830 2024-12-03T23:31:04,464 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c6477990e979586f139ab47f9dab2f20, had cached 0 bytes from a total of 5890 2024-12-03T23:31:04,464 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b50632fd0085469e770e68c6467cbdf0, had cached 0 bytes from a total of 14661 2024-12-03T23:31:05,782 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0009_01_000011 while processing FINISH_CONTAINERS event 2024-12-03T23:31:06,035 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000003/launch_container.sh] 2024-12-03T23:31:06,035 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000003/container_tokens] 2024-12-03T23:31:06,035 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000003/sysfs] 2024-12-03T23:31:06,295 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0009_01_000012 while processing FINISH_CONTAINERS event 2024-12-03T23:31:07,998 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_492864253_1 at /127.0.0.1:33480 [Receiving block BP-1874256318-172.17.0.2-1733268364664:blk_1073742369_1545] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 350ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1/, blockId=1073742369, seqno=4 2024-12-03T23:31:07,998 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_492864253_1 at /127.0.0.1:44762 [Receiving block BP-1874256318-172.17.0.2-1733268364664:blk_1073742369_1545] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 350ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3/, blockId=1073742369, seqno=4 2024-12-03T23:31:07,998 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_492864253_1 at /127.0.0.1:50420 [Receiving block BP-1874256318-172.17.0.2-1733268364664:blk_1073742369_1545] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 350ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5/, blockId=1073742369, seqno=4 2024-12-03T23:31:08,449 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241203e8d8894ff75b4eedb0440aa05441b8f0_a2ce8b5750bad6679883aabcdedd0e0e and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241203e8d8894ff75b4eedb0440aa05441b8f0_a2ce8b5750bad6679883aabcdedd0e0e. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-03T23:31:08,617 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000007/launch_container.sh] 2024-12-03T23:31:08,617 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000007/container_tokens] 2024-12-03T23:31:08,617 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000007/sysfs] 2024-12-03T23:31:09,274 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:40844 2024-12-03T23:31:10,250 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000009/launch_container.sh] 2024-12-03T23:31:10,250 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000009/container_tokens] 2024-12-03T23:31:10,251 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_1/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000009/sysfs] 2024-12-03T23:31:10,404 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000010/launch_container.sh] 2024-12-03T23:31:10,404 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000010/container_tokens] 2024-12-03T23:31:10,404 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000010/sysfs] 2024-12-03T23:31:10,485 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241203656047715ed7412186c94036ba514ccc_5cb7960ee15b899fc682c05cfcfa76be and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241203656047715ed7412186c94036ba514ccc_5cb7960ee15b899fc682c05cfcfa76be. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/cf/5eca9104720d4e358ba2e42844ff7f8b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/archive/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/cf/5eca9104720d4e358ba2e42844ff7f8b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/cf/a22020723917458497acef23688b0740 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/archive/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/cf/a22020723917458497acef23688b0740. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-03T23:31:10,564 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=919.14 KB, freeSize=879.10 MB, max=880 MB, blockCount=3, accesses=5, hits=2, hitRatio=40.00%, , cachingAccesses=5, cachingHits=2, cachingHitsRatio=40.00%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-03T23:31:10,654 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-03T23:31:10,766 DEBUG [master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-03T23:31:10,766 DEBUG [master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-03T23:31:11,286 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:58518 2024-12-03T23:31:11,288 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:40848 2024-12-03T23:31:12,291 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:44440 2024-12-03T23:31:12,410 INFO [regionserver/ed8014902436:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-03T23:31:12,412 INFO [regionserver/ed8014902436:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-03T23:31:12,418 INFO [regionserver/ed8014902436:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-03T23:31:13,696 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region c6477990e979586f139ab47f9dab2f20 changed from -1.0 to 0.0, refreshing cache 2024-12-03T23:31:13,696 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5cb7960ee15b899fc682c05cfcfa76be changed from -1.0 to 0.0, refreshing cache 2024-12-03T23:31:13,696 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b50632fd0085469e770e68c6467cbdf0 changed from -1.0 to 0.0, refreshing cache 2024-12-03T23:31:13,696 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region a2ce8b5750bad6679883aabcdedd0e0e changed from -1.0 to 0.0, refreshing cache 2024-12-03T23:31:13,705 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-03T23:31:13,705 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportWithChecksum because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-03T23:31:13,717 DEBUG [master/ed8014902436:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T23:31:14,230 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0009_01_000017 while processing FINISH_CONTAINERS event 2024-12-03T23:31:14,382 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0009_01_000018 while processing FINISH_CONTAINERS event 2024-12-03T23:31:14,445 INFO [regionserver/ed8014902436:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/info has an old edit so flush to free WALs after random delay 59189 ms 2024-12-03T23:31:14,733 DEBUG [master/ed8014902436:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-12-03T23:31:14,733 INFO [master/ed8014902436:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-12-03T23:31:14,733 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-12-03T23:31:14,734 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:31:14,735 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 3 regions 2024-12-03T23:31:14,735 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 1 regions 2024-12-03T23:31:14,735 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 2 regions 2024-12-03T23:31:14,735 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:31:14,735 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:31:14,735 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:31:14,735 INFO [master/ed8014902436:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:31:14,735 INFO [master/ed8014902436:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:31:14,735 INFO [master/ed8014902436:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:31:14,735 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-12-03T23:31:14,738 INFO [master/ed8014902436:0.Chore.1 {}] balancer.StochasticLoadBalancer(403): Cluster wide - Calculating plan. may take up to 30000ms to complete. 2024-12-03T23:31:14,743 INFO [master/ed8014902436:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.2542628785789233, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.2886751345948129, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=14400 2024-12-03T23:31:15,060 INFO [master/ed8014902436:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 325 ms to try 14400 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.2542628785789233 to a new imbalance of 0.015725134319608505. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.16666666666666666, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8223514921478572, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8533464809192823, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-03T23:31:15,065 INFO [master/ed8014902436:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 1, the balance interval is 300000 ms, and the max number regions in transition is 6 2024-12-03T23:31:15,065 INFO [master/ed8014902436:0.Chore.1 {}] master.HMaster(2172): balance hri=94ece278d222af9bae70064026dc24ae, source=ed8014902436,37351,1733268370600, destination=ed8014902436,39085,1733268370401 2024-12-03T23:31:15,075 DEBUG [master/ed8014902436:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=94ece278d222af9bae70064026dc24ae, REOPEN/MOVE 2024-12-03T23:31:15,076 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=94ece278d222af9bae70064026dc24ae, REOPEN/MOVE 2024-12-03T23:31:15,078 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=94ece278d222af9bae70064026dc24ae, regionState=CLOSING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:31:15,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=94ece278d222af9bae70064026dc24ae, REOPEN/MOVE because future has completed 2024-12-03T23:31:15,087 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:31:15,087 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure 94ece278d222af9bae70064026dc24ae, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:31:15,241 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(122): Close 94ece278d222af9bae70064026dc24ae 2024-12-03T23:31:15,241 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:31:15,241 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1722): Closing 94ece278d222af9bae70064026dc24ae, disabling compactions & flushes 2024-12-03T23:31:15,241 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1755): Closing region hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:31:15,241 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:31:15,241 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. after waiting 0 ms 2024-12-03T23:31:15,241 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:31:15,242 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(2902): Flushing 94ece278d222af9bae70064026dc24ae 1/1 column families, dataSize=1.47 KB heapSize=3.49 KB 2024-12-03T23:31:15,293 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000008/launch_container.sh] 2024-12-03T23:31:15,293 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000008/container_tokens] 2024-12-03T23:31:15,293 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000008/sysfs] 2024-12-03T23:31:15,303 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/.tmp/l/f46f17fabe794cf6877057e55041e64e is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733268616965/DeleteFamily/seqid=0 2024-12-03T23:31:15,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742370_1546 (size=5791) 2024-12-03T23:31:15,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742370_1546 (size=5791) 2024-12-03T23:31:15,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742370_1546 (size=5791) 2024-12-03T23:31:15,415 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.47 KB at sequenceid=28 (bloomFilter=false), to=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/.tmp/l/f46f17fabe794cf6877057e55041e64e 2024-12-03T23:31:15,469 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f46f17fabe794cf6877057e55041e64e 2024-12-03T23:31:15,471 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/.tmp/l/f46f17fabe794cf6877057e55041e64e as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/l/f46f17fabe794cf6877057e55041e64e 2024-12-03T23:31:15,480 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f46f17fabe794cf6877057e55041e64e 2024-12-03T23:31:15,480 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/l/f46f17fabe794cf6877057e55041e64e, entries=13, sequenceid=28, filesize=5.7 K 2024-12-03T23:31:15,482 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(3140): Finished flush of dataSize ~1.47 KB/1504, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 94ece278d222af9bae70064026dc24ae in 241ms, sequenceid=28, compaction requested=false 2024-12-03T23:31:15,500 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/recovered.edits/31.seqid, newMaxSeqId=31, maxSeqId=1 2024-12-03T23:31:15,500 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:31:15,500 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1973): Closed hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:31:15,501 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1676): Region close journal for 94ece278d222af9bae70064026dc24ae: Waiting for close lock at 1733268675241Running coprocessor pre-close hooks at 1733268675241Disabling compacts and flushes for region at 1733268675241Disabling writes for close at 1733268675241Obtaining lock to block concurrent updates at 1733268675242 (+1 ms)Preparing flush snapshotting stores in 94ece278d222af9bae70064026dc24ae at 1733268675242Finished memstore snapshotting hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., syncing WAL and waiting on mvcc, flushsize=dataSize=1504, getHeapSize=3560, getOffHeapSize=0, getCellsCount=24 at 1733268675242Flushing stores of hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. at 1733268675251 (+9 ms)Flushing 94ece278d222af9bae70064026dc24ae/l: creating writer at 1733268675251Flushing 94ece278d222af9bae70064026dc24ae/l: appending metadata at 1733268675303 (+52 ms)Flushing 94ece278d222af9bae70064026dc24ae/l: closing flushed file at 1733268675303Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@575c85f2: reopening flushed file at 1733268675469 (+166 ms)Finished flush of dataSize ~1.47 KB/1504, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 94ece278d222af9bae70064026dc24ae in 241ms, sequenceid=28, compaction requested=false at 1733268675482 (+13 ms)Writing region close event to WAL at 1733268675495 (+13 ms)Running coprocessor post-close hooks at 1733268675500 (+5 ms)Closed at 1733268675500 2024-12-03T23:31:15,501 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegionServer(3302): Adding 94ece278d222af9bae70064026dc24ae move to ed8014902436,39085,1733268370401 record at close sequenceid=28 2024-12-03T23:31:15,503 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(157): Closed 94ece278d222af9bae70064026dc24ae 2024-12-03T23:31:15,504 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=94ece278d222af9bae70064026dc24ae, regionState=CLOSED 2024-12-03T23:31:15,506 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure 94ece278d222af9bae70064026dc24ae, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:31:15,510 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-12-03T23:31:15,510 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseRegionProcedure 94ece278d222af9bae70064026dc24ae, server=ed8014902436,37351,1733268370600 in 420 msec 2024-12-03T23:31:15,512 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=94ece278d222af9bae70064026dc24ae, REOPEN/MOVE; state=CLOSED, location=ed8014902436,39085,1733268370401; forceNewPlan=false, retain=false Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241203e8d8894ff75b4eedb0440aa05441b8f0_a2ce8b5750bad6679883aabcdedd0e0e and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241203e8d8894ff75b4eedb0440aa05441b8f0_a2ce8b5750bad6679883aabcdedd0e0e. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-03T23:31:15,663 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-03T23:31:15,663 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=94ece278d222af9bae70064026dc24ae, regionState=OPENING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:31:15,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=94ece278d222af9bae70064026dc24ae, REOPEN/MOVE because future has completed 2024-12-03T23:31:15,666 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure 94ece278d222af9bae70064026dc24ae, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:31:15,869 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] handler.AssignRegionHandler(132): Open hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:31:15,870 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(7752): Opening region: {ENCODED => 94ece278d222af9bae70064026dc24ae, NAME => 'hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae.', STARTKEY => '', ENDKEY => ''} 2024-12-03T23:31:15,870 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. service=AccessControlService 2024-12-03T23:31:15,870 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:31:15,871 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 94ece278d222af9bae70064026dc24ae 2024-12-03T23:31:15,871 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(898): Instantiated hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:31:15,871 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(7794): checking encryption for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:31:15,871 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(7797): checking classloading for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:31:15,873 INFO [StoreOpener-94ece278d222af9bae70064026dc24ae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 94ece278d222af9bae70064026dc24ae 2024-12-03T23:31:15,874 INFO [StoreOpener-94ece278d222af9bae70064026dc24ae-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 94ece278d222af9bae70064026dc24ae columnFamilyName l 2024-12-03T23:31:15,874 DEBUG [StoreOpener-94ece278d222af9bae70064026dc24ae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:31:15,912 INFO [StoreFileOpener-94ece278d222af9bae70064026dc24ae-l-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f46f17fabe794cf6877057e55041e64e 2024-12-03T23:31:15,912 DEBUG [StoreOpener-94ece278d222af9bae70064026dc24ae-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/l/f46f17fabe794cf6877057e55041e64e 2024-12-03T23:31:15,912 INFO [StoreOpener-94ece278d222af9bae70064026dc24ae-1 {}] regionserver.HStore(327): Store=94ece278d222af9bae70064026dc24ae/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:31:15,912 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(1038): replaying wal for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:31:15,913 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae 2024-12-03T23:31:15,914 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae 2024-12-03T23:31:15,914 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(1048): stopping wal replay for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:31:15,914 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(1060): Cleaning up temporary data for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:31:15,917 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(1093): writing seq id for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:31:15,918 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(1114): Opened 94ece278d222af9bae70064026dc24ae; next sequenceid=32; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60878891, jitterRate=-0.09283383190631866}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:31:15,918 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:31:15,922 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-03T23:31:15,923 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-03T23:31:15,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-03T23:31:15,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-03T23:31:15,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-03T23:31:15,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-03T23:31:15,964 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:15,963 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:15,974 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:15,975 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:15,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-03T23:31:15,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-03T23:31:15,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-03T23:31:15,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-03T23:31:15,983 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegion(1006): Region open journal for 94ece278d222af9bae70064026dc24ae: Running coprocessor pre-open hook at 1733268675871Writing region info on filesystem at 1733268675871Initializing all the Stores at 1733268675872 (+1 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733268675872Cleaning up temporary data from old regions at 1733268675914 (+42 ms)Running coprocessor post-open hooks at 1733268675918 (+4 ms)Region opened successfully at 1733268675983 (+65 ms) 2024-12-03T23:31:15,984 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:15,984 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:15,984 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., pid=226, masterSystemTime=1733268675821 2024-12-03T23:31:15,984 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:15,985 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:15,986 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:31:15,987 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=226}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:31:15,988 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=94ece278d222af9bae70064026dc24ae, regionState=OPEN, openSeqNum=32, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:31:15,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure 94ece278d222af9bae70064026dc24ae, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:31:15,997 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=224 2024-12-03T23:31:15,997 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=224, state=SUCCESS, hasLock=false; OpenRegionProcedure 94ece278d222af9bae70064026dc24ae, server=ed8014902436,39085,1733268370401 in 326 msec 2024-12-03T23:31:16,000 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=94ece278d222af9bae70064026dc24ae, REOPEN/MOVE in 932 msec 2024-12-03T23:31:16,079 DEBUG [master/ed8014902436:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-12-03T23:31:16,300 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:44452 2024-12-03T23:31:18,257 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000015/launch_container.sh] 2024-12-03T23:31:18,257 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000015/container_tokens] 2024-12-03T23:31:18,257 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000015/sysfs] 2024-12-03T23:31:18,331 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_2/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000014/launch_container.sh] 2024-12-03T23:31:18,331 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_2/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000014/container_tokens] 2024-12-03T23:31:18,331 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_2/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000014/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/cf/5eca9104720d4e358ba2e42844ff7f8b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/archive/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/cf/5eca9104720d4e358ba2e42844ff7f8b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241203656047715ed7412186c94036ba514ccc_5cb7960ee15b899fc682c05cfcfa76be and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241203656047715ed7412186c94036ba514ccc_5cb7960ee15b899fc682c05cfcfa76be. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-03T23:31:18,708 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:31:19,146 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000016/launch_container.sh] 2024-12-03T23:31:19,146 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000016/container_tokens] 2024-12-03T23:31:19,146 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000016/sysfs] 2024-12-03T23:31:19,320 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:44462 2024-12-03T23:31:19,322 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:44476 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/cf/a22020723917458497acef23688b0740 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/local-export-1733268645799/archive/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/cf/a22020723917458497acef23688b0740. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-03T23:31:19,902 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-03T23:31:20,207 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000013/launch_container.sh] 2024-12-03T23:31:20,208 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000013/container_tokens] 2024-12-03T23:31:20,208 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000013/sysfs] 2024-12-03T23:31:20,328 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:44486 2024-12-03T23:31:22,219 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0009_01_000023 while processing FINISH_CONTAINERS event 2024-12-03T23:31:23,220 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0009_01_000024 while processing FINISH_CONTAINERS event 2024-12-03T23:31:23,333 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:51980 2024-12-03T23:31:23,334 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:51982 2024-12-03T23:31:23,341 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:51978 2024-12-03T23:31:23,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742369_1545 (size=49083) 2024-12-03T23:31:23,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742369_1545 (size=49083) 2024-12-03T23:31:23,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742369_1545 (size=49083) 2024-12-03T23:31:23,496 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733268377251_0009_01_000021 is : 143 2024-12-03T23:31:23,539 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000021/launch_container.sh] 2024-12-03T23:31:23,539 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000021/container_tokens] 2024-12-03T23:31:23,539 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000021/sysfs] 2024-12-03T23:31:23,541 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733268377251_0009_01_000020 is : 143 2024-12-03T23:31:23,547 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733268377251_0009_01_000022 is : 143 2024-12-03T23:31:23,566 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_2/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000020/launch_container.sh] 2024-12-03T23:31:23,566 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_2/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000020/container_tokens] 2024-12-03T23:31:23,566 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_2/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000020/sysfs] 2024-12-03T23:31:23,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742371_1547 (size=460) 2024-12-03T23:31:23,572 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000022/launch_container.sh] 2024-12-03T23:31:23,573 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000022/container_tokens] 2024-12-03T23:31:23,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742371_1547 (size=460) 2024-12-03T23:31:23,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742371_1547 (size=460) 2024-12-03T23:31:23,573 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000022/sysfs] 2024-12-03T23:31:23,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742372_1548 (size=49083) 2024-12-03T23:31:23,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742372_1548 (size=49083) 2024-12-03T23:31:23,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742372_1548 (size=49083) 2024-12-03T23:31:23,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742373_1549 (size=349740) 2024-12-03T23:31:23,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742373_1549 (size=349740) 2024-12-03T23:31:23,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742373_1549 (size=349740) 2024-12-03T23:31:23,636 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:51998 2024-12-03T23:31:25,640 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733268377251_0009_m_000001 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T23:31:25,641 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268685641 2024-12-03T23:31:25,641 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37331, tgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268685641, rawTgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268685641, srcFsUri=hdfs://localhost:37331, srcDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:31:25,666 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37331, inputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:31:25,666 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268685641, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268685641/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-03T23:31:25,668 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-03T23:31:25,673 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268685641/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-03T23:31:25,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742375_1551 (size=156) 2024-12-03T23:31:25,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742375_1551 (size=156) 2024-12-03T23:31:25,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742375_1551 (size=156) 2024-12-03T23:31:25,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742374_1550 (size=951) 2024-12-03T23:31:25,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742374_1550 (size=951) 2024-12-03T23:31:25,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742374_1550 (size=951) 2024-12-03T23:31:25,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:25,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:25,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:26,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-13803482831596568705.jar 2024-12-03T23:31:26,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:26,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:26,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-11733367105953270082.jar 2024-12-03T23:31:26,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:26,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:26,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:26,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:26,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:26,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:26,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-03T23:31:26,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-03T23:31:26,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-03T23:31:26,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-03T23:31:26,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-03T23:31:26,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-03T23:31:26,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-03T23:31:26,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-03T23:31:26,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-03T23:31:26,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-03T23:31:26,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-03T23:31:26,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:31:26,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:31:26,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:31:26,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:31:26,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:31:26,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:31:26,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:31:26,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742376_1552 (size=131440) 2024-12-03T23:31:26,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742376_1552 (size=131440) 2024-12-03T23:31:26,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742376_1552 (size=131440) 2024-12-03T23:31:26,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742377_1553 (size=4188619) 2024-12-03T23:31:26,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742377_1553 (size=4188619) 2024-12-03T23:31:26,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742377_1553 (size=4188619) 2024-12-03T23:31:26,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742378_1554 (size=1323991) 2024-12-03T23:31:26,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742378_1554 (size=1323991) 2024-12-03T23:31:26,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742378_1554 (size=1323991) 2024-12-03T23:31:26,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742379_1555 (size=903924) 2024-12-03T23:31:26,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742379_1555 (size=903924) 2024-12-03T23:31:26,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742379_1555 (size=903924) 2024-12-03T23:31:26,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742380_1556 (size=8360360) 2024-12-03T23:31:26,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742380_1556 (size=8360360) 2024-12-03T23:31:26,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742380_1556 (size=8360360) 2024-12-03T23:31:26,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742381_1557 (size=1877034) 2024-12-03T23:31:26,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742381_1557 (size=1877034) 2024-12-03T23:31:26,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742381_1557 (size=1877034) 2024-12-03T23:31:26,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742382_1558 (size=77835) 2024-12-03T23:31:26,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742382_1558 (size=77835) 2024-12-03T23:31:26,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742382_1558 (size=77835) 2024-12-03T23:31:26,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742383_1559 (size=30949) 2024-12-03T23:31:26,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742383_1559 (size=30949) 2024-12-03T23:31:26,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742383_1559 (size=30949) 2024-12-03T23:31:26,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742384_1560 (size=1597213) 2024-12-03T23:31:26,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742384_1560 (size=1597213) 2024-12-03T23:31:26,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742384_1560 (size=1597213) 2024-12-03T23:31:26,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742385_1561 (size=4695811) 2024-12-03T23:31:26,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742385_1561 (size=4695811) 2024-12-03T23:31:26,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742385_1561 (size=4695811) 2024-12-03T23:31:26,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742386_1562 (size=232957) 2024-12-03T23:31:26,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742386_1562 (size=232957) 2024-12-03T23:31:26,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742386_1562 (size=232957) 2024-12-03T23:31:26,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742387_1563 (size=127628) 2024-12-03T23:31:26,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742387_1563 (size=127628) 2024-12-03T23:31:26,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742387_1563 (size=127628) 2024-12-03T23:31:26,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742388_1564 (size=20406) 2024-12-03T23:31:26,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742388_1564 (size=20406) 2024-12-03T23:31:26,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742388_1564 (size=20406) 2024-12-03T23:31:26,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742389_1565 (size=5175431) 2024-12-03T23:31:26,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742389_1565 (size=5175431) 2024-12-03T23:31:26,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742389_1565 (size=5175431) 2024-12-03T23:31:26,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742390_1566 (size=217634) 2024-12-03T23:31:26,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742390_1566 (size=217634) 2024-12-03T23:31:26,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742390_1566 (size=217634) 2024-12-03T23:31:26,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742391_1567 (size=443172) 2024-12-03T23:31:26,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742391_1567 (size=443172) 2024-12-03T23:31:26,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742391_1567 (size=443172) 2024-12-03T23:31:26,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742392_1568 (size=1832290) 2024-12-03T23:31:26,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742392_1568 (size=1832290) 2024-12-03T23:31:26,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742392_1568 (size=1832290) 2024-12-03T23:31:26,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742393_1569 (size=6424741) 2024-12-03T23:31:26,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742393_1569 (size=6424741) 2024-12-03T23:31:26,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742393_1569 (size=6424741) 2024-12-03T23:31:26,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742394_1570 (size=322274) 2024-12-03T23:31:26,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742394_1570 (size=322274) 2024-12-03T23:31:26,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742394_1570 (size=322274) 2024-12-03T23:31:26,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742395_1571 (size=503880) 2024-12-03T23:31:26,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742395_1571 (size=503880) 2024-12-03T23:31:26,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742395_1571 (size=503880) 2024-12-03T23:31:26,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742396_1572 (size=29229) 2024-12-03T23:31:26,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742396_1572 (size=29229) 2024-12-03T23:31:26,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742396_1572 (size=29229) 2024-12-03T23:31:26,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742397_1573 (size=24096) 2024-12-03T23:31:26,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742397_1573 (size=24096) 2024-12-03T23:31:26,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742397_1573 (size=24096) 2024-12-03T23:31:26,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742398_1574 (size=111872) 2024-12-03T23:31:26,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742398_1574 (size=111872) 2024-12-03T23:31:26,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742398_1574 (size=111872) 2024-12-03T23:31:26,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742399_1575 (size=45609) 2024-12-03T23:31:26,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742399_1575 (size=45609) 2024-12-03T23:31:26,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742399_1575 (size=45609) 2024-12-03T23:31:26,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742400_1576 (size=136454) 2024-12-03T23:31:26,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742400_1576 (size=136454) 2024-12-03T23:31:26,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742400_1576 (size=136454) 2024-12-03T23:31:26,937 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-03T23:31:26,939 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-03T23:31:26,941 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.1 K 2024-12-03T23:31:26,941 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-12-03T23:31:26,941 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.2 K 2024-12-03T23:31:26,941 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-03T23:31:26,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742401_1577 (size=1023) 2024-12-03T23:31:26,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742401_1577 (size=1023) 2024-12-03T23:31:26,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742401_1577 (size=1023) 2024-12-03T23:31:26,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742402_1578 (size=35) 2024-12-03T23:31:26,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742402_1578 (size=35) 2024-12-03T23:31:26,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742402_1578 (size=35) 2024-12-03T23:31:26,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742403_1579 (size=303996) 2024-12-03T23:31:26,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742403_1579 (size=303996) 2024-12-03T23:31:26,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742403_1579 (size=303996) 2024-12-03T23:31:28,603 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000019/launch_container.sh] 2024-12-03T23:31:28,603 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000019/container_tokens] 2024-12-03T23:31:28,603 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000019/sysfs] 2024-12-03T23:31:29,699 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:31:29,699 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:31:29,701 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0009_000001 (auth:SIMPLE) from 127.0.0.1:41496 2024-12-03T23:31:29,713 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000001/launch_container.sh] 2024-12-03T23:31:29,713 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000001/container_tokens] 2024-12-03T23:31:29,713 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0009/container_1733268377251_0009_01_000001/sysfs] 2024-12-03T23:31:29,827 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a2ce8b5750bad6679883aabcdedd0e0e, had cached 0 bytes from a total of 14451 2024-12-03T23:31:29,827 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5cb7960ee15b899fc682c05cfcfa76be, had cached 0 bytes from a total of 6308 2024-12-03T23:31:30,565 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0010_000001 (auth:SIMPLE) from 127.0.0.1:52008 2024-12-03T23:31:34,936 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0010_000001 (auth:SIMPLE) from 127.0.0.1:49554 2024-12-03T23:31:35,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742404_1580 (size=349694) 2024-12-03T23:31:35,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742404_1580 (size=349694) 2024-12-03T23:31:35,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742404_1580 (size=349694) 2024-12-03T23:31:37,141 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0010_000001 (auth:SIMPLE) from 127.0.0.1:54388 2024-12-03T23:31:37,142 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0010_000001 (auth:SIMPLE) from 127.0.0.1:58630 2024-12-03T23:31:38,019 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0010_000001 (auth:SIMPLE) from 127.0.0.1:58636 2024-12-03T23:31:38,022 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0010_000001 (auth:SIMPLE) from 127.0.0.1:54398 2024-12-03T23:31:38,450 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:31:40,702 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0010_01_000006 while processing FINISH_CONTAINERS event 2024-12-03T23:31:41,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742405_1581 (size=14451) 2024-12-03T23:31:41,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742405_1581 (size=14451) 2024-12-03T23:31:41,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742405_1581 (size=14451) 2024-12-03T23:31:42,304 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000002/launch_container.sh] 2024-12-03T23:31:42,305 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000002/container_tokens] 2024-12-03T23:31:42,305 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_0/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000002/sysfs] 2024-12-03T23:31:43,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742407_1583 (size=6308) 2024-12-03T23:31:43,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742407_1583 (size=6308) 2024-12-03T23:31:43,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742407_1583 (size=6308) 2024-12-03T23:31:43,869 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000004/launch_container.sh] 2024-12-03T23:31:43,869 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000004/container_tokens] 2024-12-03T23:31:43,869 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_1/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000004/sysfs] 2024-12-03T23:31:43,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742408_1584 (size=8032) 2024-12-03T23:31:43,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742408_1584 (size=8032) 2024-12-03T23:31:43,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742408_1584 (size=8032) 2024-12-03T23:31:43,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742409_1585 (size=5241) 2024-12-03T23:31:43,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742409_1585 (size=5241) 2024-12-03T23:31:43,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742409_1585 (size=5241) 2024-12-03T23:31:44,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742406_1582 (size=31733) 2024-12-03T23:31:44,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742406_1582 (size=31733) 2024-12-03T23:31:44,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742406_1582 (size=31733) 2024-12-03T23:31:44,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742410_1586 (size=463) 2024-12-03T23:31:44,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742410_1586 (size=463) 2024-12-03T23:31:44,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742410_1586 (size=463) 2024-12-03T23:31:44,177 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000005/launch_container.sh] 2024-12-03T23:31:44,178 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000005/container_tokens] 2024-12-03T23:31:44,178 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000005/sysfs] 2024-12-03T23:31:44,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742411_1587 (size=31733) 2024-12-03T23:31:44,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742411_1587 (size=31733) 2024-12-03T23:31:44,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742411_1587 (size=31733) 2024-12-03T23:31:44,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742412_1588 (size=349694) 2024-12-03T23:31:44,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742412_1588 (size=349694) 2024-12-03T23:31:44,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742412_1588 (size=349694) 2024-12-03T23:31:44,215 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0010_000001 (auth:SIMPLE) from 127.0.0.1:50228 2024-12-03T23:31:44,223 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0010_000001 (auth:SIMPLE) from 127.0.0.1:50234 2024-12-03T23:31:45,443 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-03T23:31:45,444 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-03T23:31:45,473 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-12-03T23:31:45,473 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-03T23:31:45,474 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-03T23:31:45,474 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-03T23:31:45,474 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-03T23:31:45,475 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-03T23:31:45,475 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268685641/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268685641/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-03T23:31:45,475 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268685641/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-03T23:31:45,475 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268685641/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-03T23:31:45,483 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-03T23:31:45,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=227, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-03T23:31:45,488 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268705488"}]},"ts":"1733268705488"} 2024-12-03T23:31:45,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-03T23:31:45,490 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-03T23:31:45,490 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-03T23:31:45,491 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-03T23:31:45,494 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5cb7960ee15b899fc682c05cfcfa76be, UNASSIGN}, {pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a2ce8b5750bad6679883aabcdedd0e0e, UNASSIGN}] 2024-12-03T23:31:45,500 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a2ce8b5750bad6679883aabcdedd0e0e, UNASSIGN 2024-12-03T23:31:45,501 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=a2ce8b5750bad6679883aabcdedd0e0e, regionState=CLOSING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:31:45,502 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5cb7960ee15b899fc682c05cfcfa76be, UNASSIGN 2024-12-03T23:31:45,504 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=5cb7960ee15b899fc682c05cfcfa76be, regionState=CLOSING, regionLocation=ed8014902436,39085,1733268370401 2024-12-03T23:31:45,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a2ce8b5750bad6679883aabcdedd0e0e, UNASSIGN because future has completed 2024-12-03T23:31:45,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5cb7960ee15b899fc682c05cfcfa76be, UNASSIGN because future has completed 2024-12-03T23:31:45,512 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:31:45,512 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=231, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:31:45,523 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:31:45,523 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be, server=ed8014902436,39085,1733268370401}] 2024-12-03T23:31:45,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-03T23:31:45,688 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(122): Close a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:31:45,688 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(122): Close 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:31:45,688 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:31:45,688 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:31:45,688 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1722): Closing a2ce8b5750bad6679883aabcdedd0e0e, disabling compactions & flushes 2024-12-03T23:31:45,688 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1722): Closing 5cb7960ee15b899fc682c05cfcfa76be, disabling compactions & flushes 2024-12-03T23:31:45,688 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:31:45,688 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:31:45,688 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:31:45,688 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:31:45,688 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. after waiting 0 ms 2024-12-03T23:31:45,688 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. after waiting 0 ms 2024-12-03T23:31:45,688 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:31:45,688 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:31:45,693 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:31:45,693 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:31:45,694 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be. 2024-12-03T23:31:45,694 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1676): Region close journal for 5cb7960ee15b899fc682c05cfcfa76be: Waiting for close lock at 1733268705688Running coprocessor pre-close hooks at 1733268705688Disabling compacts and flushes for region at 1733268705688Disabling writes for close at 1733268705688Writing region close event to WAL at 1733268705689 (+1 ms)Running coprocessor post-close hooks at 1733268705693 (+4 ms)Closed at 1733268705694 (+1 ms) 2024-12-03T23:31:45,695 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:31:45,695 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:31:45,696 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e. 2024-12-03T23:31:45,696 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1676): Region close journal for a2ce8b5750bad6679883aabcdedd0e0e: Waiting for close lock at 1733268705688Running coprocessor pre-close hooks at 1733268705688Disabling compacts and flushes for region at 1733268705688Disabling writes for close at 1733268705688Writing region close event to WAL at 1733268705689 (+1 ms)Running coprocessor post-close hooks at 1733268705695 (+6 ms)Closed at 1733268705696 (+1 ms) 2024-12-03T23:31:45,696 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(157): Closed 5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:31:45,697 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=5cb7960ee15b899fc682c05cfcfa76be, regionState=CLOSED 2024-12-03T23:31:45,699 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(157): Closed a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:31:45,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be, server=ed8014902436,39085,1733268370401 because future has completed 2024-12-03T23:31:45,699 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=a2ce8b5750bad6679883aabcdedd0e0e, regionState=CLOSED 2024-12-03T23:31:45,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=231, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:31:45,703 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=229 2024-12-03T23:31:45,703 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=229, state=SUCCESS, hasLock=false; CloseRegionProcedure 5cb7960ee15b899fc682c05cfcfa76be, server=ed8014902436,39085,1733268370401 in 177 msec 2024-12-03T23:31:45,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=231, resume processing ppid=230 2024-12-03T23:31:45,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5cb7960ee15b899fc682c05cfcfa76be, UNASSIGN in 209 msec 2024-12-03T23:31:45,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, ppid=230, state=SUCCESS, hasLock=false; CloseRegionProcedure a2ce8b5750bad6679883aabcdedd0e0e, server=ed8014902436,41121,1733268370525 in 191 msec 2024-12-03T23:31:45,706 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=230, resume processing ppid=228 2024-12-03T23:31:45,706 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a2ce8b5750bad6679883aabcdedd0e0e, UNASSIGN in 210 msec 2024-12-03T23:31:45,708 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-12-03T23:31:45,708 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 215 msec 2024-12-03T23:31:45,709 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268705709"}]},"ts":"1733268705709"} 2024-12-03T23:31:45,710 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-03T23:31:45,710 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-03T23:31:45,712 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 228 msec 2024-12-03T23:31:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-03T23:31:45,810 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-03T23:31:45,811 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-03T23:31:45,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-03T23:31:45,815 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-03T23:31:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-03T23:31:45,816 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=233, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-03T23:31:45,820 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37351 {}] ipc.CallRunner(138): callId: 414 service: ClientService methodName: Mutate size: 159 connection: 172.17.0.2:42747 deadline: 1733268765817, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=ed8014902436 port=39085 startCode=1733268370401. As of locationSeqNum=28. 2024-12-03T23:31:45,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2 , the old value is region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=ed8014902436 port=39085 startCode=1733268370401. As of locationSeqNum=28. 2024-12-03T23:31:45,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=ed8014902436 port=39085 startCode=1733268370401. As of locationSeqNum=28. 2024-12-03T23:31:45,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,37351,1733268370600, seqNum=2 with the new location region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,39085,1733268370401, seqNum=28 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=ed8014902436 port=39085 startCode=1733268370401. As of locationSeqNum=28. 2024-12-03T23:31:45,829 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:31:45,829 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:31:45,831 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/recovered.edits] 2024-12-03T23:31:45,831 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/recovered.edits] 2024-12-03T23:31:45,840 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/cf/a22020723917458497acef23688b0740 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/cf/a22020723917458497acef23688b0740 2024-12-03T23:31:45,840 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/cf/5eca9104720d4e358ba2e42844ff7f8b to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/cf/5eca9104720d4e358ba2e42844ff7f8b 2024-12-03T23:31:45,842 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be/recovered.edits/9.seqid 2024-12-03T23:31:45,842 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e/recovered.edits/9.seqid 2024-12-03T23:31:45,843 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:31:45,843 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportWithChecksum/a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:31:45,843 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-03T23:31:45,843 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-03T23:31:45,844 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-12-03T23:31:45,846 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241203e8d8894ff75b4eedb0440aa05441b8f0_a2ce8b5750bad6679883aabcdedd0e0e to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241203e8d8894ff75b4eedb0440aa05441b8f0_a2ce8b5750bad6679883aabcdedd0e0e 2024-12-03T23:31:45,847 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241203656047715ed7412186c94036ba514ccc_5cb7960ee15b899fc682c05cfcfa76be to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241203656047715ed7412186c94036ba514ccc_5cb7960ee15b899fc682c05cfcfa76be 2024-12-03T23:31:45,848 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-03T23:31:45,850 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=233, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-03T23:31:45,852 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-03T23:31:45,854 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-03T23:31:45,855 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=233, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-03T23:31:45,855 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-03T23:31:45,855 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268705855"}]},"ts":"9223372036854775807"} 2024-12-03T23:31:45,855 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268705855"}]},"ts":"9223372036854775807"} 2024-12-03T23:31:45,857 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-03T23:31:45,857 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 5cb7960ee15b899fc682c05cfcfa76be, NAME => 'testtb-testExportWithChecksum,,1733268644461.5cb7960ee15b899fc682c05cfcfa76be.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a2ce8b5750bad6679883aabcdedd0e0e, NAME => 'testtb-testExportWithChecksum,1,1733268644461.a2ce8b5750bad6679883aabcdedd0e0e.', STARTKEY => '1', ENDKEY => ''}] 2024-12-03T23:31:45,857 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-03T23:31:45,857 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733268705857"}]},"ts":"9223372036854775807"} 2024-12-03T23:31:45,859 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-12-03T23:31:45,860 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=233, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-03T23:31:45,861 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 49 msec 2024-12-03T23:31:45,929 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:31:45,931 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42059, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:31:45,933 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39085 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:31:45,933 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39085 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:31:45,933 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39085 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:31:45,934 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45955, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-03T23:31:45,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,39085,1733268370401, seqNum=32] 2024-12-03T23:31:45,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:31:45,937 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48839, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-03T23:31:45,939 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39085 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-03T23:31:45,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-03T23:31:45,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-03T23:31:45,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-03T23:31:45,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-03T23:31:45,993 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-03T23:31:45,993 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-03T23:31:45,993 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-03T23:31:45,993 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-03T23:31:46,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-03T23:31:46,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-03T23:31:46,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-03T23:31:46,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-03T23:31:46,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:31:46,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:31:46,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:31:46,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:31:46,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=233 2024-12-03T23:31:46,002 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:46,002 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:46,002 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-12-03T23:31:46,002 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-03T23:31:46,002 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:46,003 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:46,007 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-12-03T23:31:46,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-03T23:31:46,009 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-12-03T23:31:46,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-03T23:31:46,034 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=825 (was 820) Potentially hanging thread: RSProcedureDispatcher-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-9077 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: process reaper (pid 161583) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:43247 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43247 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:60956 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ContainersLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:43472 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ContainersLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1710628548_1 at /127.0.0.1:60930 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=826 (was 817) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=753 (was 611) - SystemLoadAverage LEAK? -, ProcessCount=24 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=1546 (was 2088) 2024-12-03T23:31:46,034 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=825 is superior to 500 2024-12-03T23:31:46,056 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=825, OpenFileDescriptor=826, MaxFileDescriptor=1048576, SystemLoadAverage=753, ProcessCount=24, AvailableMemoryMB=1545 2024-12-03T23:31:46,056 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=825 is superior to 500 2024-12-03T23:31:46,058 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T23:31:46,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:46,064 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T23:31:46,065 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 234 2024-12-03T23:31:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-03T23:31:46,066 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T23:31:46,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742413_1589 (size=454) 2024-12-03T23:31:46,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742413_1589 (size=454) 2024-12-03T23:31:46,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742413_1589 (size=454) 2024-12-03T23:31:46,076 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8c30395de41b86b7edd28d09950bac76, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:31:46,077 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 48e5139804a581d94c2242cd268c9525, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:31:46,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742414_1590 (size=79) 2024-12-03T23:31:46,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742414_1590 (size=79) 2024-12-03T23:31:46,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742414_1590 (size=79) 2024-12-03T23:31:46,088 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:31:46,088 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing 8c30395de41b86b7edd28d09950bac76, disabling compactions & flushes 2024-12-03T23:31:46,088 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:31:46,088 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:31:46,088 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. after waiting 0 ms 2024-12-03T23:31:46,088 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:31:46,088 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:31:46,088 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8c30395de41b86b7edd28d09950bac76: Waiting for close lock at 1733268706088Disabling compacts and flushes for region at 1733268706088Disabling writes for close at 1733268706088Writing region close event to WAL at 1733268706088Closed at 1733268706088 2024-12-03T23:31:46,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742415_1591 (size=79) 2024-12-03T23:31:46,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742415_1591 (size=79) 2024-12-03T23:31:46,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742415_1591 (size=79) 2024-12-03T23:31:46,092 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:31:46,093 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 48e5139804a581d94c2242cd268c9525, disabling compactions & flushes 2024-12-03T23:31:46,093 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:31:46,093 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:31:46,093 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. after waiting 0 ms 2024-12-03T23:31:46,093 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:31:46,093 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:31:46,093 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 48e5139804a581d94c2242cd268c9525: Waiting for close lock at 1733268706093Disabling compacts and flushes for region at 1733268706093Disabling writes for close at 1733268706093Writing region close event to WAL at 1733268706093Closed at 1733268706093 2024-12-03T23:31:46,094 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T23:31:46,094 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733268706094"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268706094"}]},"ts":"1733268706094"} 2024-12-03T23:31:46,094 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733268706094"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733268706094"}]},"ts":"1733268706094"} 2024-12-03T23:31:46,096 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-03T23:31:46,097 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T23:31:46,097 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268706097"}]},"ts":"1733268706097"} 2024-12-03T23:31:46,098 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-03T23:31:46,099 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {ed8014902436=0} racks are {/default-rack=0} 2024-12-03T23:31:46,100 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T23:31:46,100 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T23:31:46,100 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T23:31:46,100 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T23:31:46,100 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T23:31:46,100 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T23:31:46,100 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T23:31:46,100 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T23:31:46,100 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T23:31:46,100 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T23:31:46,101 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8c30395de41b86b7edd28d09950bac76, ASSIGN}, {pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=48e5139804a581d94c2242cd268c9525, ASSIGN}] 2024-12-03T23:31:46,102 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8c30395de41b86b7edd28d09950bac76, ASSIGN 2024-12-03T23:31:46,102 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=48e5139804a581d94c2242cd268c9525, ASSIGN 2024-12-03T23:31:46,103 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=48e5139804a581d94c2242cd268c9525, ASSIGN; state=OFFLINE, location=ed8014902436,41121,1733268370525; forceNewPlan=false, retain=false 2024-12-03T23:31:46,103 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8c30395de41b86b7edd28d09950bac76, ASSIGN; state=OFFLINE, location=ed8014902436,37351,1733268370600; forceNewPlan=false, retain=false 2024-12-03T23:31:46,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-03T23:31:46,253 INFO [ed8014902436:43753 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-03T23:31:46,254 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=8c30395de41b86b7edd28d09950bac76, regionState=OPENING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:31:46,254 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=48e5139804a581d94c2242cd268c9525, regionState=OPENING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:31:46,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8c30395de41b86b7edd28d09950bac76, ASSIGN because future has completed 2024-12-03T23:31:46,256 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=237, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8c30395de41b86b7edd28d09950bac76, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:31:46,256 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=48e5139804a581d94c2242cd268c9525, ASSIGN because future has completed 2024-12-03T23:31:46,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=238, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure 48e5139804a581d94c2242cd268c9525, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:31:46,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-03T23:31:46,410 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:31:46,410 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7752): Opening region: {ENCODED => 8c30395de41b86b7edd28d09950bac76, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76.', STARTKEY => '', ENDKEY => '1'} 2024-12-03T23:31:46,410 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:31:46,410 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7752): Opening region: {ENCODED => 48e5139804a581d94c2242cd268c9525, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525.', STARTKEY => '1', ENDKEY => ''} 2024-12-03T23:31:46,410 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. service=AccessControlService 2024-12-03T23:31:46,410 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. service=AccessControlService 2024-12-03T23:31:46,410 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:31:46,410 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-03T23:31:46,411 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,411 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,411 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:31:46,411 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T23:31:46,411 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7794): checking encryption for 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,411 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7794): checking encryption for 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,411 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7797): checking classloading for 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,411 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7797): checking classloading for 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,412 INFO [StoreOpener-8c30395de41b86b7edd28d09950bac76-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,412 INFO [StoreOpener-48e5139804a581d94c2242cd268c9525-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,413 INFO [StoreOpener-48e5139804a581d94c2242cd268c9525-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 48e5139804a581d94c2242cd268c9525 columnFamilyName cf 2024-12-03T23:31:46,413 INFO [StoreOpener-8c30395de41b86b7edd28d09950bac76-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c30395de41b86b7edd28d09950bac76 columnFamilyName cf 2024-12-03T23:31:46,414 DEBUG [StoreOpener-8c30395de41b86b7edd28d09950bac76-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:31:46,414 DEBUG [StoreOpener-48e5139804a581d94c2242cd268c9525-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:31:46,414 INFO [StoreOpener-8c30395de41b86b7edd28d09950bac76-1 {}] regionserver.HStore(327): Store=8c30395de41b86b7edd28d09950bac76/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:31:46,414 INFO [StoreOpener-48e5139804a581d94c2242cd268c9525-1 {}] regionserver.HStore(327): Store=48e5139804a581d94c2242cd268c9525/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T23:31:46,414 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1038): replaying wal for 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,414 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1038): replaying wal for 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,415 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,415 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,415 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,415 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,415 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1048): stopping wal replay for 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,415 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1048): stopping wal replay for 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,415 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1060): Cleaning up temporary data for 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,415 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1060): Cleaning up temporary data for 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,416 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1093): writing seq id for 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,416 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1093): writing seq id for 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,417 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:31:46,418 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T23:31:46,418 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1114): Opened 48e5139804a581d94c2242cd268c9525; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73982093, jitterRate=0.10241909325122833}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:31:46,418 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1114): Opened 8c30395de41b86b7edd28d09950bac76; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58994389, jitterRate=-0.12091509997844696}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T23:31:46,418 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,418 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,418 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1006): Region open journal for 48e5139804a581d94c2242cd268c9525: Running coprocessor pre-open hook at 1733268706411Writing region info on filesystem at 1733268706411Initializing all the Stores at 1733268706411Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268706411Cleaning up temporary data from old regions at 1733268706415 (+4 ms)Running coprocessor post-open hooks at 1733268706418 (+3 ms)Region opened successfully at 1733268706418 2024-12-03T23:31:46,418 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1006): Region open journal for 8c30395de41b86b7edd28d09950bac76: Running coprocessor pre-open hook at 1733268706411Writing region info on filesystem at 1733268706411Initializing all the Stores at 1733268706411Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733268706411Cleaning up temporary data from old regions at 1733268706415 (+4 ms)Running coprocessor post-open hooks at 1733268706418 (+3 ms)Region opened successfully at 1733268706418 2024-12-03T23:31:46,419 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76., pid=237, masterSystemTime=1733268706407 2024-12-03T23:31:46,419 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525., pid=238, masterSystemTime=1733268706408 2024-12-03T23:31:46,421 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:31:46,421 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:31:46,421 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=48e5139804a581d94c2242cd268c9525, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:31:46,421 DEBUG [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:31:46,421 INFO [RS_OPEN_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:31:46,422 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=8c30395de41b86b7edd28d09950bac76, regionState=OPEN, openSeqNum=2, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:31:46,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=238, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure 48e5139804a581d94c2242cd268c9525, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:31:46,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=237, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8c30395de41b86b7edd28d09950bac76, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:31:46,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=238, resume processing ppid=236 2024-12-03T23:31:46,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=236, state=SUCCESS, hasLock=false; OpenRegionProcedure 48e5139804a581d94c2242cd268c9525, server=ed8014902436,41121,1733268370525 in 166 msec 2024-12-03T23:31:46,426 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=48e5139804a581d94c2242cd268c9525, ASSIGN in 325 msec 2024-12-03T23:31:46,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=237, resume processing ppid=235 2024-12-03T23:31:46,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, ppid=235, state=SUCCESS, hasLock=false; OpenRegionProcedure 8c30395de41b86b7edd28d09950bac76, server=ed8014902436,37351,1733268370600 in 169 msec 2024-12-03T23:31:46,429 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=234 2024-12-03T23:31:46,429 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8c30395de41b86b7edd28d09950bac76, ASSIGN in 326 msec 2024-12-03T23:31:46,430 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T23:31:46,430 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268706430"}]},"ts":"1733268706430"} 2024-12-03T23:31:46,431 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-03T23:31:46,433 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T23:31:46,433 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-03T23:31:46,437 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39085 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-03T23:31:46,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:31:46,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:31:46,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:31:46,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:31:46,502 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:46,502 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:46,503 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:46,503 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:46,503 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:46,503 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:46,503 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:46,503 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-03T23:31:46,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 443 msec 2024-12-03T23:31:46,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-03T23:31:46,689 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-03T23:31:46,689 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-03T23:31:46,694 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:46,694 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:31:46,694 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:31:46,696 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-03T23:31:46,701 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-03T23:31:46,707 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-03T23:31:46,709 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-03T23:31:46,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268706709 (current time:1733268706709). 2024-12-03T23:31:46,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:31:46,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-03T23:31:46,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:31:46,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6789cd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:31:46,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:31:46,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:31:46,716 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:31:46,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:31:46,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:31:46,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@507249f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:31:46,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:31:46,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:31:46,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:31:46,719 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59346, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:31:46,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64f8b4b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:31:46,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:31:46,720 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:31:46,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:31:46,722 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56640, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:31:46,723 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753. 2024-12-03T23:31:46,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:31:46,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:31:46,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:31:46,723 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:31:46,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@164fd563, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:31:46,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:31:46,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:31:46,725 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:31:46,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:31:46,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:31:46,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8523bcc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:31:46,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:31:46,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:31:46,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:31:46,726 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59366, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:31:46,727 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bad8c32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:31:46,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:31:46,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:31:46,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:31:46,730 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56644, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:31:46,732 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,39085,1733268370401, seqNum=32] 2024-12-03T23:31:46,732 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:31:46,733 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59022, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:31:46,734 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753. 2024-12-03T23:31:46,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:31:46,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:31:46,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:31:46,735 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:31:46,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-03T23:31:46,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:31:46,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-03T23:31:46,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-03T23:31:46,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-03T23:31:46,738 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:31:46,739 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:31:46,742 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:31:46,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742416_1592 (size=203) 2024-12-03T23:31:46,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742416_1592 (size=203) 2024-12-03T23:31:46,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742416_1592 (size=203) 2024-12-03T23:31:46,752 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:31:46,752 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8c30395de41b86b7edd28d09950bac76}, {pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 48e5139804a581d94c2242cd268c9525}] 2024-12-03T23:31:46,753 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,753 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-03T23:31:46,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=240 2024-12-03T23:31:46,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=241 2024-12-03T23:31:46,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:31:46,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:31:46,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2603): Flush status journal for 48e5139804a581d94c2242cd268c9525: 2024-12-03T23:31:46,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2603): Flush status journal for 8c30395de41b86b7edd28d09950bac76: 2024-12-03T23:31:46,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-03T23:31:46,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-03T23:31:46,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:46,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:46,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:31:46,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:31:46,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:31:46,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-03T23:31:46,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742418_1594 (size=82) 2024-12-03T23:31:46,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742418_1594 (size=82) 2024-12-03T23:31:46,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742417_1593 (size=82) 2024-12-03T23:31:46,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742417_1593 (size=82) 2024-12-03T23:31:46,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742417_1593 (size=82) 2024-12-03T23:31:46,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:31:46,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742418_1594 (size=82) 2024-12-03T23:31:46,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=241 2024-12-03T23:31:46,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:31:46,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=240 2024-12-03T23:31:46,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=240 2024-12-03T23:31:46,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=241 2024-12-03T23:31:46,939 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,939 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,939 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:46,939 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:46,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 48e5139804a581d94c2242cd268c9525 in 187 msec 2024-12-03T23:31:46,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=240, resume processing ppid=239 2024-12-03T23:31:46,942 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:31:46,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8c30395de41b86b7edd28d09950bac76 in 187 msec 2024-12-03T23:31:46,942 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:31:46,943 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:31:46,943 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:31:46,943 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:31:46,944 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-03T23:31:46,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742419_1595 (size=74) 2024-12-03T23:31:46,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742419_1595 (size=74) 2024-12-03T23:31:46,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742419_1595 (size=74) 2024-12-03T23:31:46,958 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:31:46,959 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:46,959 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:46,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742420_1596 (size=697) 2024-12-03T23:31:46,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742420_1596 (size=697) 2024-12-03T23:31:46,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742420_1596 (size=697) 2024-12-03T23:31:46,990 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:31:46,997 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:31:46,997 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:47,001 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:31:47,001 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-03T23:31:47,003 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 266 msec 2024-12-03T23:31:47,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-03T23:31:47,050 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-03T23:31:47,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37351 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:31:47,061 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41121 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. with WAL disabled. Data may be lost in the event of a crash. 2024-12-03T23:31:47,062 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-03T23:31:47,064 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:47,064 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:31:47,064 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T23:31:47,065 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-03T23:31:47,068 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-03T23:31:47,072 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-03T23:31:47,074 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-03T23:31:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733268707074 (current time:1733268707074). 2024-12-03T23:31:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-03T23:31:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-03T23:31:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-03T23:31:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f05fad8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:31:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:31:47,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:31:47,075 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:31:47,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:31:47,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:31:47,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ce8aedc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:31:47,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:31:47,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:31:47,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:31:47,076 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59380, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:31:47,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ea2c54d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:31:47,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:31:47,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:31:47,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:31:47,078 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56650, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:31:47,079 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753. 2024-12-03T23:31:47,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:31:47,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:31:47,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:31:47,079 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:31:47,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61076823, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:31:47,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ClusterIdFetcher(90): Going to request ed8014902436,43753,-1 for getting cluster id 2024-12-03T23:31:47,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T23:31:47,080 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '365bf15c-9cbe-4838-a767-ac7274dcdbe4' 2024-12-03T23:31:47,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T23:31:47,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "365bf15c-9cbe-4838-a767-ac7274dcdbe4" 2024-12-03T23:31:47,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@464dc1fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:31:47,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ed8014902436,43753,-1] 2024-12-03T23:31:47,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T23:31:47,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:31:47,081 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59398, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T23:31:47,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e0328f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T23:31:47,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T23:31:47,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ed8014902436,37351,1733268370600, seqNum=-1] 2024-12-03T23:31:47,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:31:47,083 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56654, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:31:47,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., hostname=ed8014902436,39085,1733268370401, seqNum=32] 2024-12-03T23:31:47,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T23:31:47,085 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59034, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T23:31:47,086 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753. 2024-12-03T23:31:47,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor256.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T23:31:47,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:31:47,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:31:47,086 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:31:47,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-03T23:31:47,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-03T23:31:47,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-03T23:31:47,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-03T23:31:47,088 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-03T23:31:47,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-03T23:31:47,089 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-03T23:31:47,091 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-03T23:31:47,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742421_1597 (size=198) 2024-12-03T23:31:47,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742421_1597 (size=198) 2024-12-03T23:31:47,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742421_1597 (size=198) 2024-12-03T23:31:47,097 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-03T23:31:47,097 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8c30395de41b86b7edd28d09950bac76}, {pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 48e5139804a581d94c2242cd268c9525}] 2024-12-03T23:31:47,098 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:47,099 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:47,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-03T23:31:47,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37351 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=243 2024-12-03T23:31:47,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41121 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=244 2024-12-03T23:31:47,251 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:31:47,251 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:31:47,251 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2902): Flushing 8c30395de41b86b7edd28d09950bac76 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-03T23:31:47,251 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2902): Flushing 48e5139804a581d94c2242cd268c9525 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-03T23:31:47,296 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203b7983a93368542b4a24453e8c53a8ebb_8c30395de41b86b7edd28d09950bac76 is 71, key is 01a5d311b5760cb5f9375246f7f42016/cf:q/1733268707059/Put/seqid=0 2024-12-03T23:31:47,296 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203c9295e1368094a5aab42cc411b101f1c_48e5139804a581d94c2242cd268c9525 is 71, key is 128a4619b01999fbc02f34d707cbba96/cf:q/1733268707061/Put/seqid=0 2024-12-03T23:31:47,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742422_1598 (size=5102) 2024-12-03T23:31:47,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742423_1599 (size=8172) 2024-12-03T23:31:47,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742422_1598 (size=5102) 2024-12-03T23:31:47,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742423_1599 (size=8172) 2024-12-03T23:31:47,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742422_1598 (size=5102) 2024-12-03T23:31:47,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742423_1599 (size=8172) 2024-12-03T23:31:47,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:31:47,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:31:47,308 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241203c9295e1368094a5aab42cc411b101f1c_48e5139804a581d94c2242cd268c9525 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241203c9295e1368094a5aab42cc411b101f1c_48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:47,308 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203b7983a93368542b4a24453e8c53a8ebb_8c30395de41b86b7edd28d09950bac76 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241203b7983a93368542b4a24453e8c53a8ebb_8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:47,309 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/.tmp/cf/6c171eb6b37c4e07a21fb3f4584b3540, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=48e5139804a581d94c2242cd268c9525] 2024-12-03T23:31:47,309 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/.tmp/cf/dd69448b923a4b8f983e19863c6c81e9, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=8c30395de41b86b7edd28d09950bac76] 2024-12-03T23:31:47,309 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/.tmp/cf/dd69448b923a4b8f983e19863c6c81e9 is 220, key is 0b1f74fbfeb5b9e422c7a6aea1833ecbd/cf:q/1733268707059/Put/seqid=0 2024-12-03T23:31:47,309 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/.tmp/cf/6c171eb6b37c4e07a21fb3f4584b3540 is 220, key is 122e381b420672f884c09c6008396a386/cf:q/1733268707061/Put/seqid=0 2024-12-03T23:31:47,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742425_1601 (size=15527) 2024-12-03T23:31:47,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742424_1600 (size=5962) 2024-12-03T23:31:47,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742425_1601 (size=15527) 2024-12-03T23:31:47,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742425_1601 (size=15527) 2024-12-03T23:31:47,313 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/.tmp/cf/6c171eb6b37c4e07a21fb3f4584b3540 2024-12-03T23:31:47,313 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/.tmp/cf/dd69448b923a4b8f983e19863c6c81e9 2024-12-03T23:31:47,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742424_1600 (size=5962) 2024-12-03T23:31:47,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742424_1600 (size=5962) 2024-12-03T23:31:47,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/.tmp/cf/dd69448b923a4b8f983e19863c6c81e9 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/cf/dd69448b923a4b8f983e19863c6c81e9 2024-12-03T23:31:47,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/.tmp/cf/6c171eb6b37c4e07a21fb3f4584b3540 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/cf/6c171eb6b37c4e07a21fb3f4584b3540 2024-12-03T23:31:47,321 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/cf/dd69448b923a4b8f983e19863c6c81e9, entries=3, sequenceid=6, filesize=5.8 K 2024-12-03T23:31:47,321 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/cf/6c171eb6b37c4e07a21fb3f4584b3540, entries=47, sequenceid=6, filesize=15.2 K 2024-12-03T23:31:47,322 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 8c30395de41b86b7edd28d09950bac76 in 71ms, sequenceid=6, compaction requested=false 2024-12-03T23:31:47,322 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 48e5139804a581d94c2242cd268c9525 in 71ms, sequenceid=6, compaction requested=false 2024-12-03T23:31:47,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-03T23:31:47,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-03T23:31:47,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2603): Flush status journal for 8c30395de41b86b7edd28d09950bac76: 2024-12-03T23:31:47,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2603): Flush status journal for 48e5139804a581d94c2242cd268c9525: 2024-12-03T23:31:47,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-03T23:31:47,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-03T23:31:47,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:47,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:47,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:31:47,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-03T23:31:47,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/cf/6c171eb6b37c4e07a21fb3f4584b3540] hfiles 2024-12-03T23:31:47,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/cf/dd69448b923a4b8f983e19863c6c81e9] hfiles 2024-12-03T23:31:47,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/cf/dd69448b923a4b8f983e19863c6c81e9 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:47,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/cf/6c171eb6b37c4e07a21fb3f4584b3540 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:47,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742426_1602 (size=121) 2024-12-03T23:31:47,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742426_1602 (size=121) 2024-12-03T23:31:47,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742427_1603 (size=121) 2024-12-03T23:31:47,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742427_1603 (size=121) 2024-12-03T23:31:47,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742426_1602 (size=121) 2024-12-03T23:31:47,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742427_1603 (size=121) 2024-12-03T23:31:47,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:31:47,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:31:47,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=243 2024-12-03T23:31:47,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/ed8014902436:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=244 2024-12-03T23:31:47,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=243 2024-12-03T23:31:47,328 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:47,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster(4169): Remote procedure done, pid=244 2024-12-03T23:31:47,328 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:47,329 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:47,329 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:47,330 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 48e5139804a581d94c2242cd268c9525 in 232 msec 2024-12-03T23:31:47,331 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=243, resume processing ppid=242 2024-12-03T23:31:47,331 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8c30395de41b86b7edd28d09950bac76 in 232 msec 2024-12-03T23:31:47,331 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-03T23:31:47,331 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-03T23:31:47,332 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-03T23:31:47,332 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-03T23:31:47,332 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T23:31:47,333 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241203c9295e1368094a5aab42cc411b101f1c_48e5139804a581d94c2242cd268c9525, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241203b7983a93368542b4a24453e8c53a8ebb_8c30395de41b86b7edd28d09950bac76] hfiles 2024-12-03T23:31:47,333 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241203c9295e1368094a5aab42cc411b101f1c_48e5139804a581d94c2242cd268c9525 2024-12-03T23:31:47,333 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241203b7983a93368542b4a24453e8c53a8ebb_8c30395de41b86b7edd28d09950bac76 2024-12-03T23:31:47,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742428_1604 (size=305) 2024-12-03T23:31:47,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742428_1604 (size=305) 2024-12-03T23:31:47,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742428_1604 (size=305) 2024-12-03T23:31:47,338 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-03T23:31:47,338 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:47,339 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:47,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742429_1605 (size=1007) 2024-12-03T23:31:47,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742429_1605 (size=1007) 2024-12-03T23:31:47,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742429_1605 (size=1007) 2024-12-03T23:31:47,347 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-03T23:31:47,352 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-03T23:31:47,353 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:47,354 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-03T23:31:47,354 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-03T23:31:47,355 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 267 msec 2024-12-03T23:31:47,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-03T23:31:47,410 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-03T23:31:47,410 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268707410 2024-12-03T23:31:47,410 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37331, tgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268707410, rawTgtDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268707410, srcFsUri=hdfs://localhost:37331, srcDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:31:47,437 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37331, inputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b 2024-12-03T23:31:47,437 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268707410, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268707410/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:47,438 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-03T23:31:47,442 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268707410/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:47,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742430_1606 (size=1007) 2024-12-03T23:31:47,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742431_1607 (size=198) 2024-12-03T23:31:47,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742430_1606 (size=1007) 2024-12-03T23:31:47,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742430_1606 (size=1007) 2024-12-03T23:31:47,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742431_1607 (size=198) 2024-12-03T23:31:47,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742431_1607 (size=198) 2024-12-03T23:31:47,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:47,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:47,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:48,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-13519897426842311353.jar 2024-12-03T23:31:48,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:48,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:48,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop-11171342277676419539.jar 2024-12-03T23:31:48,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:48,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:48,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:48,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:48,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:48,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-03T23:31:48,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-03T23:31:48,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-03T23:31:48,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-03T23:31:48,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-03T23:31:48,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-03T23:31:48,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-03T23:31:48,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-03T23:31:48,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-03T23:31:48,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-03T23:31:48,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-03T23:31:48,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-03T23:31:48,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:31:48,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:31:48,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:31:48,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:31:48,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-03T23:31:48,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:31:48,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-03T23:31:48,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742432_1608 (size=131440) 2024-12-03T23:31:48,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742432_1608 (size=131440) 2024-12-03T23:31:48,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742432_1608 (size=131440) 2024-12-03T23:31:48,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742433_1609 (size=4188619) 2024-12-03T23:31:48,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742433_1609 (size=4188619) 2024-12-03T23:31:48,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742433_1609 (size=4188619) 2024-12-03T23:31:48,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742434_1610 (size=1323991) 2024-12-03T23:31:48,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742434_1610 (size=1323991) 2024-12-03T23:31:48,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742434_1610 (size=1323991) 2024-12-03T23:31:48,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742435_1611 (size=903924) 2024-12-03T23:31:48,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742435_1611 (size=903924) 2024-12-03T23:31:48,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742435_1611 (size=903924) 2024-12-03T23:31:48,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742436_1612 (size=8360360) 2024-12-03T23:31:48,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742436_1612 (size=8360360) 2024-12-03T23:31:48,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742436_1612 (size=8360360) 2024-12-03T23:31:48,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742437_1613 (size=1877034) 2024-12-03T23:31:48,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742437_1613 (size=1877034) 2024-12-03T23:31:48,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742437_1613 (size=1877034) 2024-12-03T23:31:48,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742438_1614 (size=77835) 2024-12-03T23:31:48,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742438_1614 (size=77835) 2024-12-03T23:31:48,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742438_1614 (size=77835) 2024-12-03T23:31:48,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742439_1615 (size=6424741) 2024-12-03T23:31:48,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742439_1615 (size=6424741) 2024-12-03T23:31:48,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742439_1615 (size=6424741) 2024-12-03T23:31:48,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742440_1616 (size=30949) 2024-12-03T23:31:48,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742440_1616 (size=30949) 2024-12-03T23:31:48,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742440_1616 (size=30949) 2024-12-03T23:31:48,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742441_1617 (size=1597213) 2024-12-03T23:31:48,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742441_1617 (size=1597213) 2024-12-03T23:31:48,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742441_1617 (size=1597213) 2024-12-03T23:31:48,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742442_1618 (size=4695811) 2024-12-03T23:31:48,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742442_1618 (size=4695811) 2024-12-03T23:31:48,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742442_1618 (size=4695811) 2024-12-03T23:31:48,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742443_1619 (size=232957) 2024-12-03T23:31:48,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742443_1619 (size=232957) 2024-12-03T23:31:48,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742443_1619 (size=232957) 2024-12-03T23:31:48,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742444_1620 (size=127628) 2024-12-03T23:31:48,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742444_1620 (size=127628) 2024-12-03T23:31:48,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742444_1620 (size=127628) 2024-12-03T23:31:48,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742445_1621 (size=20406) 2024-12-03T23:31:48,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742445_1621 (size=20406) 2024-12-03T23:31:48,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742445_1621 (size=20406) 2024-12-03T23:31:48,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742446_1622 (size=5175431) 2024-12-03T23:31:48,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742446_1622 (size=5175431) 2024-12-03T23:31:48,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742446_1622 (size=5175431) 2024-12-03T23:31:48,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742447_1623 (size=217634) 2024-12-03T23:31:48,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742447_1623 (size=217634) 2024-12-03T23:31:48,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742447_1623 (size=217634) 2024-12-03T23:31:48,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742448_1624 (size=443172) 2024-12-03T23:31:48,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742448_1624 (size=443172) 2024-12-03T23:31:48,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742448_1624 (size=443172) 2024-12-03T23:31:48,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742449_1625 (size=1832290) 2024-12-03T23:31:48,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742449_1625 (size=1832290) 2024-12-03T23:31:48,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742449_1625 (size=1832290) 2024-12-03T23:31:48,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742450_1626 (size=322274) 2024-12-03T23:31:48,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742450_1626 (size=322274) 2024-12-03T23:31:48,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742450_1626 (size=322274) 2024-12-03T23:31:48,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742451_1627 (size=503880) 2024-12-03T23:31:48,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742451_1627 (size=503880) 2024-12-03T23:31:48,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742451_1627 (size=503880) 2024-12-03T23:31:48,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742452_1628 (size=29229) 2024-12-03T23:31:48,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742452_1628 (size=29229) 2024-12-03T23:31:48,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742452_1628 (size=29229) 2024-12-03T23:31:48,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742453_1629 (size=24096) 2024-12-03T23:31:48,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742453_1629 (size=24096) 2024-12-03T23:31:48,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742453_1629 (size=24096) 2024-12-03T23:31:49,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742454_1630 (size=111872) 2024-12-03T23:31:49,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742454_1630 (size=111872) 2024-12-03T23:31:49,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742454_1630 (size=111872) 2024-12-03T23:31:49,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742455_1631 (size=45609) 2024-12-03T23:31:49,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742455_1631 (size=45609) 2024-12-03T23:31:49,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742455_1631 (size=45609) 2024-12-03T23:31:49,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742456_1632 (size=136454) 2024-12-03T23:31:49,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742456_1632 (size=136454) 2024-12-03T23:31:49,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742456_1632 (size=136454) 2024-12-03T23:31:49,023 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-03T23:31:49,025 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-03T23:31:49,028 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=15.2 K 2024-12-03T23:31:49,028 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-03T23:31:49,029 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-03T23:31:49,029 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-03T23:31:49,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742457_1633 (size=1079) 2024-12-03T23:31:49,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742457_1633 (size=1079) 2024-12-03T23:31:49,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742457_1633 (size=1079) 2024-12-03T23:31:49,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742458_1634 (size=35) 2024-12-03T23:31:49,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742458_1634 (size=35) 2024-12-03T23:31:49,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742458_1634 (size=35) 2024-12-03T23:31:49,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742459_1635 (size=304170) 2024-12-03T23:31:49,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742459_1635 (size=304170) 2024-12-03T23:31:49,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742459_1635 (size=304170) 2024-12-03T23:31:49,252 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000003/launch_container.sh] 2024-12-03T23:31:49,252 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000003/container_tokens] 2024-12-03T23:31:49,252 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000003/sysfs] 2024-12-03T23:31:49,465 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c6477990e979586f139ab47f9dab2f20, had cached 0 bytes from a total of 5890 2024-12-03T23:31:49,465 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b50632fd0085469e770e68c6467cbdf0, had cached 0 bytes from a total of 14661 2024-12-03T23:31:49,917 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:31:49,918 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-03T23:31:49,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-03T23:31:50,286 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:31:50,286 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-03T23:31:50,288 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0010_000001 (auth:SIMPLE) from 127.0.0.1:45866 2024-12-03T23:31:50,297 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000001/launch_container.sh] 2024-12-03T23:31:50,297 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000001/container_tokens] 2024-12-03T23:31:50,297 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0010/container_1733268377251_0010_01_000001/sysfs] 2024-12-03T23:31:51,163 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0011_000001 (auth:SIMPLE) from 127.0.0.1:50236 2024-12-03T23:31:51,211 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:31:55,557 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0011_000001 (auth:SIMPLE) from 127.0.0.1:35238 2024-12-03T23:31:55,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742460_1636 (size=349892) 2024-12-03T23:31:55,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742460_1636 (size=349892) 2024-12-03T23:31:55,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742460_1636 (size=349892) 2024-12-03T23:31:57,789 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0011_000001 (auth:SIMPLE) from 127.0.0.1:43572 2024-12-03T23:31:57,789 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0011_000001 (auth:SIMPLE) from 127.0.0.1:36076 2024-12-03T23:31:58,649 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0011_000001 (auth:SIMPLE) from 127.0.0.1:43578 2024-12-03T23:31:58,652 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0011_000001 (auth:SIMPLE) from 127.0.0.1:36082 2024-12-03T23:32:00,871 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 94ece278d222af9bae70064026dc24ae, had cached 0 bytes from a total of 5791 2024-12-03T23:32:01,289 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733268377251_0011_01_000006 while processing FINISH_CONTAINERS event 2024-12-03T23:32:02,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742461_1637 (size=15527) 2024-12-03T23:32:02,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742461_1637 (size=15527) 2024-12-03T23:32:02,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742461_1637 (size=15527) 2024-12-03T23:32:02,817 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000002/launch_container.sh] 2024-12-03T23:32:02,817 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000002/container_tokens] 2024-12-03T23:32:02,818 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_3/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000002/sysfs] 2024-12-03T23:32:04,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742463_1639 (size=5962) 2024-12-03T23:32:04,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742463_1639 (size=5962) 2024-12-03T23:32:04,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742463_1639 (size=5962) 2024-12-03T23:32:04,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742464_1640 (size=5102) 2024-12-03T23:32:04,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742464_1640 (size=5102) 2024-12-03T23:32:04,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742464_1640 (size=5102) 2024-12-03T23:32:04,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742465_1641 (size=8172) 2024-12-03T23:32:04,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742465_1641 (size=8172) 2024-12-03T23:32:04,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742465_1641 (size=8172) 2024-12-03T23:32:04,623 WARN [ContainersLauncher #6 {}] launcher.ContainerCleanup(157): Exception when trying to cleanup container container_1733268377251_0011_01_000005: java.io.FileNotFoundException: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_2/nmPrivate/application_1733268377251_0011/container_1733268377251_0011_01_000005/container_1733268377251_0011_01_000005.pid (No such file or directory) at java.base/java.io.FileInputStream.open0(Native Method) at java.base/java.io.FileInputStream.open(FileInputStream.java:216) at java.base/java.io.FileInputStream.(FileInputStream.java:157) at org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader.getProcessId(ProcessIdFileReader.java:59) at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.getContainerPid(ContainerLaunch.java:1050) at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerCleanup.run(ContainerCleanup.java:119) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T23:32:04,638 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000005/launch_container.sh] 2024-12-03T23:32:04,638 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000005/container_tokens] 2024-12-03T23:32:04,638 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000005/sysfs] 2024-12-03T23:32:04,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742462_1638 (size=31803) 2024-12-03T23:32:04,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742462_1638 (size=31803) 2024-12-03T23:32:04,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742462_1638 (size=31803) 2024-12-03T23:32:04,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742466_1642 (size=477) 2024-12-03T23:32:04,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742466_1642 (size=477) 2024-12-03T23:32:04,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742466_1642 (size=477) 2024-12-03T23:32:04,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742467_1643 (size=31803) 2024-12-03T23:32:04,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742467_1643 (size=31803) 2024-12-03T23:32:04,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742467_1643 (size=31803) 2024-12-03T23:32:04,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742468_1644 (size=349892) 2024-12-03T23:32:04,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742468_1644 (size=349892) 2024-12-03T23:32:04,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742468_1644 (size=349892) 2024-12-03T23:32:04,825 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733268377251_0011_000001 (auth:SIMPLE) from 127.0.0.1:57808 2024-12-03T23:32:06,228 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-03T23:32:06,228 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-03T23:32:06,242 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,242 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-03T23:32:06,242 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-03T23:32:06,243 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,243 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-03T23:32:06,243 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-03T23:32:06,243 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-919726820_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268707410/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268707410/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,243 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268707410/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-03T23:32:06,243 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/export-test/export-1733268707410/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-03T23:32:06,250 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=245, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-03T23:32:06,255 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268726255"}]},"ts":"1733268726255"} 2024-12-03T23:32:06,257 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-03T23:32:06,257 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-03T23:32:06,258 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-03T23:32:06,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8c30395de41b86b7edd28d09950bac76, UNASSIGN}, {pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=48e5139804a581d94c2242cd268c9525, UNASSIGN}] 2024-12-03T23:32:06,263 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=48e5139804a581d94c2242cd268c9525, UNASSIGN 2024-12-03T23:32:06,263 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8c30395de41b86b7edd28d09950bac76, UNASSIGN 2024-12-03T23:32:06,264 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=8c30395de41b86b7edd28d09950bac76, regionState=CLOSING, regionLocation=ed8014902436,37351,1733268370600 2024-12-03T23:32:06,264 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=48e5139804a581d94c2242cd268c9525, regionState=CLOSING, regionLocation=ed8014902436,41121,1733268370525 2024-12-03T23:32:06,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8c30395de41b86b7edd28d09950bac76, UNASSIGN because future has completed 2024-12-03T23:32:06,266 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:32:06,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=249, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8c30395de41b86b7edd28d09950bac76, server=ed8014902436,37351,1733268370600}] 2024-12-03T23:32:06,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=48e5139804a581d94c2242cd268c9525, UNASSIGN because future has completed 2024-12-03T23:32:06,267 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T23:32:06,267 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=250, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure 48e5139804a581d94c2242cd268c9525, server=ed8014902436,41121,1733268370525}] 2024-12-03T23:32:06,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-03T23:32:06,419 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(122): Close 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:32:06,419 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:32:06,419 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1722): Closing 8c30395de41b86b7edd28d09950bac76, disabling compactions & flushes 2024-12-03T23:32:06,419 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:32:06,419 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:32:06,419 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. after waiting 0 ms 2024-12-03T23:32:06,420 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:32:06,420 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(122): Close 48e5139804a581d94c2242cd268c9525 2024-12-03T23:32:06,420 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-03T23:32:06,420 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1722): Closing 48e5139804a581d94c2242cd268c9525, disabling compactions & flushes 2024-12-03T23:32:06,420 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:32:06,420 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:32:06,420 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. after waiting 0 ms 2024-12-03T23:32:06,420 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:32:06,438 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:32:06,441 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:32:06,440 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T23:32:06,441 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76. 2024-12-03T23:32:06,441 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1676): Region close journal for 8c30395de41b86b7edd28d09950bac76: Waiting for close lock at 1733268726419Running coprocessor pre-close hooks at 1733268726419Disabling compacts and flushes for region at 1733268726419Disabling writes for close at 1733268726419Writing region close event to WAL at 1733268726420 (+1 ms)Running coprocessor post-close hooks at 1733268726440 (+20 ms)Closed at 1733268726441 (+1 ms) 2024-12-03T23:32:06,441 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:32:06,441 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525. 2024-12-03T23:32:06,441 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1676): Region close journal for 48e5139804a581d94c2242cd268c9525: Waiting for close lock at 1733268726420Running coprocessor pre-close hooks at 1733268726420Disabling compacts and flushes for region at 1733268726420Disabling writes for close at 1733268726420Writing region close event to WAL at 1733268726431 (+11 ms)Running coprocessor post-close hooks at 1733268726441 (+10 ms)Closed at 1733268726441 2024-12-03T23:32:06,442 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(157): Closed 8c30395de41b86b7edd28d09950bac76 2024-12-03T23:32:06,443 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=8c30395de41b86b7edd28d09950bac76, regionState=CLOSED 2024-12-03T23:32:06,444 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(157): Closed 48e5139804a581d94c2242cd268c9525 2024-12-03T23:32:06,444 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=48e5139804a581d94c2242cd268c9525, regionState=CLOSED 2024-12-03T23:32:06,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=249, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8c30395de41b86b7edd28d09950bac76, server=ed8014902436,37351,1733268370600 because future has completed 2024-12-03T23:32:06,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=250, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure 48e5139804a581d94c2242cd268c9525, server=ed8014902436,41121,1733268370525 because future has completed 2024-12-03T23:32:06,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=249, resume processing ppid=247 2024-12-03T23:32:06,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=249, ppid=247, state=SUCCESS, hasLock=false; CloseRegionProcedure 8c30395de41b86b7edd28d09950bac76, server=ed8014902436,37351,1733268370600 in 180 msec 2024-12-03T23:32:06,448 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=250, resume processing ppid=248 2024-12-03T23:32:06,448 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=250, ppid=248, state=SUCCESS, hasLock=false; CloseRegionProcedure 48e5139804a581d94c2242cd268c9525, server=ed8014902436,41121,1733268370525 in 179 msec 2024-12-03T23:32:06,448 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8c30395de41b86b7edd28d09950bac76, UNASSIGN in 186 msec 2024-12-03T23:32:06,449 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=248, resume processing ppid=246 2024-12-03T23:32:06,449 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=48e5139804a581d94c2242cd268c9525, UNASSIGN in 187 msec 2024-12-03T23:32:06,451 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-12-03T23:32:06,451 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 191 msec 2024-12-03T23:32:06,452 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733268726452"}]},"ts":"1733268726452"} 2024-12-03T23:32:06,453 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-03T23:32:06,453 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-03T23:32:06,454 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 203 msec 2024-12-03T23:32:06,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-03T23:32:06,569 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-03T23:32:06,570 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] procedure2.ProcedureExecutor(1139): Stored pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,572 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,573 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=251, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,576 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39085 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,580 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76 2024-12-03T23:32:06,581 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525 2024-12-03T23:32:06,582 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/recovered.edits] 2024-12-03T23:32:06,586 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/cf, FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/recovered.edits] 2024-12-03T23:32:06,589 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/cf/6c171eb6b37c4e07a21fb3f4584b3540 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/cf/6c171eb6b37c4e07a21fb3f4584b3540 2024-12-03T23:32:06,592 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/cf/dd69448b923a4b8f983e19863c6c81e9 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/cf/dd69448b923a4b8f983e19863c6c81e9 2024-12-03T23:32:06,594 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525/recovered.edits/9.seqid 2024-12-03T23:32:06,595 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/48e5139804a581d94c2242cd268c9525 2024-12-03T23:32:06,595 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/recovered.edits/9.seqid to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76/recovered.edits/9.seqid 2024-12-03T23:32:06,597 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testtb-testExportFileSystemStateWithSkipTmp/8c30395de41b86b7edd28d09950bac76 2024-12-03T23:32:06,597 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-03T23:32:06,597 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-03T23:32:06,598 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-12-03T23:32:06,615 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241203c9295e1368094a5aab42cc411b101f1c_48e5139804a581d94c2242cd268c9525 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241203c9295e1368094a5aab42cc411b101f1c_48e5139804a581d94c2242cd268c9525 2024-12-03T23:32:06,617 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241203b7983a93368542b4a24453e8c53a8ebb_8c30395de41b86b7edd28d09950bac76 to hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241203b7983a93368542b4a24453e8c53a8ebb_8c30395de41b86b7edd28d09950bac76 2024-12-03T23:32:06,618 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-03T23:32:06,622 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=251, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,628 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-03T23:32:06,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,631 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-03T23:32:06,631 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-03T23:32:06,631 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-03T23:32:06,631 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-03T23:32:06,632 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-03T23:32:06,637 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=251, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,637 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-03T23:32:06,637 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268726637"}]},"ts":"9223372036854775807"} 2024-12-03T23:32:06,638 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733268726637"}]},"ts":"9223372036854775807"} 2024-12-03T23:32:06,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:32:06,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:32:06,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:32:06,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-03T23:32:06,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=251 2024-12-03T23:32:06,641 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:32:06,641 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:32:06,641 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:32:06,642 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-03T23:32:06,642 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 8c30395de41b86b7edd28d09950bac76, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733268706057.8c30395de41b86b7edd28d09950bac76.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 48e5139804a581d94c2242cd268c9525, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733268706057.48e5139804a581d94c2242cd268c9525.', STARTKEY => '1', ENDKEY => ''}] 2024-12-03T23:32:06,642 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-03T23:32:06,642 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-03T23:32:06,642 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733268726642"}]},"ts":"9223372036854775807"} 2024-12-03T23:32:06,644 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-03T23:32:06,646 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=251, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,648 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=251, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 76 msec 2024-12-03T23:32:06,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=251 2024-12-03T23:32:06,750 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,750 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-03T23:32:06,756 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-03T23:32:06,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,759 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-03T23:32:06,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:06,785 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=831 (was 825) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:58298 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 165142) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (790252428) connection to localhost/127.0.0.1:36171 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_207538943_1 at /127.0.0.1:46862 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:59896 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36171 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_207538943_1 at /127.0.0.1:59874 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-9935 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-919726820_22 at /127.0.0.1:46894 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=823 (was 826), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=783 (was 753) - SystemLoadAverage LEAK? -, ProcessCount=24 (was 24), AvailableMemoryMB=1364 (was 1545) 2024-12-03T23:32:06,786 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=831 is superior to 500 2024-12-03T23:32:06,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-12-03T23:32:06,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@700abb2{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-03T23:32:06,796 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f5c072{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T23:32:06,796 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T23:32:06,797 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d33fe8e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-03T23:32:06,797 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a3730f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,STOPPED} 2024-12-03T23:32:06,809 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733268377251_0011_01_000001 is : 143 2024-12-03T23:32:06,824 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000001/launch_container.sh] 2024-12-03T23:32:06,824 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000001/container_tokens] 2024-12-03T23:32:06,824 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000001/sysfs] 2024-12-03T23:32:08,450 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:32:09,660 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000004/launch_container.sh] 2024-12-03T23:32:09,660 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000004/container_tokens] 2024-12-03T23:32:09,660 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-1_2/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000004/sysfs] 2024-12-03T23:32:09,859 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000003/launch_container.sh] 2024-12-03T23:32:09,859 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000003/container_tokens] 2024-12-03T23:32:09,859 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_377774742/yarn-6959272986/MiniMRCluster_377774742-localDir-nm-0_3/usercache/jenkins/appcache/application_1733268377251_0011/container_1733268377251_0011_01_000003/sysfs] 2024-12-03T23:32:09,917 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-03T23:32:11,963 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:32:12,380 WARN [regionserver/ed8014902436:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 4, running: 1 2024-12-03T23:32:13,634 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-03T23:32:13,634 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=87.09 KB heapSize=137.91 KB 2024-12-03T23:32:13,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/.tmp/info/ebd9b7efb5f84385b74b5a690bafb7c5 is 173, key is testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0./info:regioninfo/1733268619477/Put/seqid=0 2024-12-03T23:32:13,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742469_1645 (size=16203) 2024-12-03T23:32:13,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742469_1645 (size=16203) 2024-12-03T23:32:13,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742469_1645 (size=16203) 2024-12-03T23:32:13,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74.09 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/.tmp/info/ebd9b7efb5f84385b74b5a690bafb7c5 2024-12-03T23:32:13,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/.tmp/ns/f784ea3442b646bca0f5089a7b4d6872 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94./ns:/1733268617034/DeleteFamily/seqid=0 2024-12-03T23:32:13,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742470_1646 (size=8378) 2024-12-03T23:32:13,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742470_1646 (size=8378) 2024-12-03T23:32:13,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742470_1646 (size=8378) 2024-12-03T23:32:13,684 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/.tmp/ns/f784ea3442b646bca0f5089a7b4d6872 2024-12-03T23:32:13,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/.tmp/rep_barrier/4ed9a598a52647d8977f0d3282471cda is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94./rep_barrier:/1733268617034/DeleteFamily/seqid=0 2024-12-03T23:32:13,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742471_1647 (size=8717) 2024-12-03T23:32:13,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742471_1647 (size=8717) 2024-12-03T23:32:13,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742471_1647 (size=8717) 2024-12-03T23:32:13,705 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.95 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/.tmp/rep_barrier/4ed9a598a52647d8977f0d3282471cda 2024-12-03T23:32:13,723 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/.tmp/table/1e9cf56520d24ee9aac9b5c3c69a23e0 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733268597414.0f2277f66421dd9546af949a5d3e0a94./table:/1733268617034/DeleteFamily/seqid=0 2024-12-03T23:32:13,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742472_1648 (size=9531) 2024-12-03T23:32:13,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742472_1648 (size=9531) 2024-12-03T23:32:13,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742472_1648 (size=9531) 2024-12-03T23:32:13,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.27 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/.tmp/table/1e9cf56520d24ee9aac9b5c3c69a23e0 2024-12-03T23:32:13,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/.tmp/info/ebd9b7efb5f84385b74b5a690bafb7c5 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/info/ebd9b7efb5f84385b74b5a690bafb7c5 2024-12-03T23:32:13,736 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/info/ebd9b7efb5f84385b74b5a690bafb7c5, entries=89, sequenceid=240, filesize=15.8 K 2024-12-03T23:32:13,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/.tmp/ns/f784ea3442b646bca0f5089a7b4d6872 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/ns/f784ea3442b646bca0f5089a7b4d6872 2024-12-03T23:32:13,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/ns/f784ea3442b646bca0f5089a7b4d6872, entries=28, sequenceid=240, filesize=8.2 K 2024-12-03T23:32:13,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/.tmp/rep_barrier/4ed9a598a52647d8977f0d3282471cda as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/rep_barrier/4ed9a598a52647d8977f0d3282471cda 2024-12-03T23:32:13,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/rep_barrier/4ed9a598a52647d8977f0d3282471cda, entries=26, sequenceid=240, filesize=8.5 K 2024-12-03T23:32:13,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/.tmp/table/1e9cf56520d24ee9aac9b5c3c69a23e0 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/table/1e9cf56520d24ee9aac9b5c3c69a23e0 2024-12-03T23:32:13,749 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/table/1e9cf56520d24ee9aac9b5c3c69a23e0, entries=43, sequenceid=240, filesize=9.3 K 2024-12-03T23:32:13,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~87.09 KB/89184, heapSize ~137.85 KB/141160, currentSize=0 B/0 for 1588230740 in 116ms, sequenceid=240, compaction requested=false 2024-12-03T23:32:13,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-03T23:32:15,939 DEBUG [master/ed8014902436:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 94ece278d222af9bae70064026dc24ae changed from -1.0 to 0.0, refreshing cache 2024-12-03T23:32:23,816 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c6df60a{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-03T23:32:23,816 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@afb0898{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T23:32:23,816 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T23:32:23,817 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2727aaa7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-03T23:32:23,817 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c129cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,STOPPED} 2024-12-03T23:32:34,465 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c6477990e979586f139ab47f9dab2f20, had cached 0 bytes from a total of 5890 2024-12-03T23:32:34,466 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b50632fd0085469e770e68c6467cbdf0, had cached 0 bytes from a total of 14661 2024-12-03T23:32:38,450 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:32:40,830 ERROR [Thread[Thread-402,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-03T23:32:40,830 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b303091{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-03T23:32:40,831 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2cc0d6f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T23:32:40,831 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T23:32:40,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d2a5e21{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-03T23:32:40,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@534bb42e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,STOPPED} 2024-12-03T23:32:40,833 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-03T23:32:40,838 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-03T23:32:40,838 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-03T23:32:40,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741830_1006 (size=1171017) 2024-12-03T23:32:40,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741830_1006 (size=1171017) 2024-12-03T23:32:40,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741830_1006 (size=1171017) 2024-12-03T23:32:40,842 ERROR [Thread[Thread-423,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-03T23:32:40,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ef8b464{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-03T23:32:40,845 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1404e5df{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T23:32:40,845 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T23:32:40,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@230c0589{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-03T23:32:40,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dc5d503{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,STOPPED} 2024-12-03T23:32:40,846 ERROR [Thread[Thread-384,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-03T23:32:40,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-12-03T23:32:40,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T23:32:40,847 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T23:32:40,847 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T23:32:40,847 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:32:40,847 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:32:40,847 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T23:32:40,847 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T23:32:40,847 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1683478754, stopped=false 2024-12-03T23:32:40,847 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:32:40,847 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-03T23:32:40,847 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ed8014902436,43753,1733268369485 2024-12-03T23:32:40,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T23:32:40,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T23:32:40,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T23:32:40,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T23:32:40,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:32:40,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:32:40,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:32:40,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:32:40,897 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T23:32:40,899 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T23:32:40,899 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T23:32:40,899 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T23:32:40,900 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T23:32:40,900 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:32:40,900 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T23:32:40,901 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ed8014902436,39085,1733268370401' ***** 2024-12-03T23:32:40,901 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T23:32:40,901 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:32:40,901 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T23:32:40,901 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ed8014902436,41121,1733268370525' ***** 2024-12-03T23:32:40,902 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:32:40,902 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T23:32:40,902 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ed8014902436,37351,1733268370600' ***** 2024-12-03T23:32:40,902 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:32:40,902 INFO [RS:0;ed8014902436:39085 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T23:32:40,902 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T23:32:40,902 INFO [RS:1;ed8014902436:41121 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T23:32:40,902 INFO [RS:0;ed8014902436:39085 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T23:32:40,902 INFO [RS:2;ed8014902436:37351 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T23:32:40,902 INFO [RS:1;ed8014902436:41121 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T23:32:40,902 INFO [RS:2;ed8014902436:37351 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T23:32:40,903 INFO [RS:1;ed8014902436:41121 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T23:32:40,903 INFO [RS:0;ed8014902436:39085 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T23:32:40,903 INFO [RS:2;ed8014902436:37351 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T23:32:40,903 INFO [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(3091): Received CLOSE for b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:32:40,903 INFO [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(3091): Received CLOSE for 94ece278d222af9bae70064026dc24ae 2024-12-03T23:32:40,903 INFO [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(3091): Received CLOSE for c6477990e979586f139ab47f9dab2f20 2024-12-03T23:32:40,903 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T23:32:40,903 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T23:32:40,903 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T23:32:40,903 INFO [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(959): stopping server ed8014902436,41121,1733268370525 2024-12-03T23:32:40,903 INFO [RS:1;ed8014902436:41121 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T23:32:40,904 INFO [RS:1;ed8014902436:41121 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;ed8014902436:41121. 2024-12-03T23:32:40,904 INFO [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(959): stopping server ed8014902436,39085,1733268370401 2024-12-03T23:32:40,904 INFO [RS:0;ed8014902436:39085 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T23:32:40,904 DEBUG [RS:1;ed8014902436:41121 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T23:32:40,904 INFO [RS:0;ed8014902436:39085 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ed8014902436:39085. 2024-12-03T23:32:40,904 DEBUG [RS:1;ed8014902436:41121 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:32:40,904 DEBUG [RS:0;ed8014902436:39085 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T23:32:40,904 DEBUG [RS:0;ed8014902436:39085 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:32:40,904 INFO [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-03T23:32:40,904 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 94ece278d222af9bae70064026dc24ae, disabling compactions & flushes 2024-12-03T23:32:40,904 INFO [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-03T23:32:40,904 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c6477990e979586f139ab47f9dab2f20, disabling compactions & flushes 2024-12-03T23:32:40,904 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:32:40,904 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:32:40,904 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b50632fd0085469e770e68c6467cbdf0, disabling compactions & flushes 2024-12-03T23:32:40,904 DEBUG [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(1325): Online Regions={b50632fd0085469e770e68c6467cbdf0=testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0.} 2024-12-03T23:32:40,904 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:32:40,904 DEBUG [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(1325): Online Regions={94ece278d222af9bae70064026dc24ae=hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae.} 2024-12-03T23:32:40,904 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:32:40,904 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:32:40,904 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. after waiting 0 ms 2024-12-03T23:32:40,904 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. after waiting 0 ms 2024-12-03T23:32:40,904 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:32:40,904 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:32:40,904 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:32:40,904 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. after waiting 0 ms 2024-12-03T23:32:40,904 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:32:40,904 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 94ece278d222af9bae70064026dc24ae 1/1 column families, dataSize=190 B heapSize=672 B 2024-12-03T23:32:40,904 INFO [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(959): stopping server ed8014902436,37351,1733268370600 2024-12-03T23:32:40,904 INFO [RS:2;ed8014902436:37351 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T23:32:40,904 DEBUG [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(1351): Waiting on 94ece278d222af9bae70064026dc24ae 2024-12-03T23:32:40,904 DEBUG [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(1351): Waiting on b50632fd0085469e770e68c6467cbdf0 2024-12-03T23:32:40,904 INFO [RS:2;ed8014902436:37351 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;ed8014902436:37351. 2024-12-03T23:32:40,904 DEBUG [RS:2;ed8014902436:37351 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T23:32:40,905 DEBUG [RS:2;ed8014902436:37351 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:32:40,905 INFO [RS:2;ed8014902436:37351 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T23:32:40,905 INFO [RS:2;ed8014902436:37351 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T23:32:40,905 INFO [RS:2;ed8014902436:37351 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T23:32:40,905 INFO [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T23:32:40,906 INFO [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T23:32:40,906 DEBUG [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(1325): Online Regions={c6477990e979586f139ab47f9dab2f20=testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20., 1588230740=hbase:meta,,1.1588230740} 2024-12-03T23:32:40,906 DEBUG [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c6477990e979586f139ab47f9dab2f20 2024-12-03T23:32:40,907 DEBUG [RS_CLOSE_META-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T23:32:40,907 INFO [RS_CLOSE_META-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T23:32:40,907 DEBUG [RS_CLOSE_META-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T23:32:40,907 DEBUG [RS_CLOSE_META-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T23:32:40,907 DEBUG [RS_CLOSE_META-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T23:32:40,913 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/c6477990e979586f139ab47f9dab2f20/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-03T23:32:40,913 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/default/testExportExpiredSnapshot/b50632fd0085469e770e68c6467cbdf0/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-03T23:32:40,913 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:32:40,913 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:32:40,913 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:32:40,913 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:32:40,913 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b50632fd0085469e770e68c6467cbdf0: Waiting for close lock at 1733268760903Running coprocessor pre-close hooks at 1733268760904 (+1 ms)Disabling compacts and flushes for region at 1733268760904Disabling writes for close at 1733268760904Writing region close event to WAL at 1733268760907 (+3 ms)Running coprocessor post-close hooks at 1733268760913 (+6 ms)Closed at 1733268760913 2024-12-03T23:32:40,913 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c6477990e979586f139ab47f9dab2f20: Waiting for close lock at 1733268760904Running coprocessor pre-close hooks at 1733268760904Disabling compacts and flushes for region at 1733268760904Disabling writes for close at 1733268760904Writing region close event to WAL at 1733268760907 (+3 ms)Running coprocessor post-close hooks at 1733268760913 (+6 ms)Closed at 1733268760913 2024-12-03T23:32:40,913 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733268619120.b50632fd0085469e770e68c6467cbdf0. 2024-12-03T23:32:40,913 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733268619120.c6477990e979586f139ab47f9dab2f20. 2024-12-03T23:32:40,915 DEBUG [RS_CLOSE_META-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/meta/1588230740/recovered.edits/243.seqid, newMaxSeqId=243, maxSeqId=1 2024-12-03T23:32:40,915 DEBUG [RS_CLOSE_META-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:32:40,915 DEBUG [RS_CLOSE_META-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T23:32:40,915 INFO [RS_CLOSE_META-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T23:32:40,915 DEBUG [RS_CLOSE_META-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733268760906Running coprocessor pre-close hooks at 1733268760906Disabling compacts and flushes for region at 1733268760906Disabling writes for close at 1733268760907 (+1 ms)Writing region close event to WAL at 1733268760912 (+5 ms)Running coprocessor post-close hooks at 1733268760915 (+3 ms)Closed at 1733268760915 2024-12-03T23:32:40,915 DEBUG [RS_CLOSE_META-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T23:32:40,918 INFO [regionserver/ed8014902436:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T23:32:40,921 INFO [regionserver/ed8014902436:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T23:32:40,923 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/.tmp/l/baa127f94f9f4b7ebbb938c62c84cb33 is 68, key is testtb-testExportFileSystemStateWithSkipTmp/l:/1733268726573/DeleteFamily/seqid=0 2024-12-03T23:32:40,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742473_1649 (size=5142) 2024-12-03T23:32:40,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742473_1649 (size=5142) 2024-12-03T23:32:40,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742473_1649 (size=5142) 2024-12-03T23:32:40,927 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=190 B at sequenceid=37 (bloomFilter=false), to=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/.tmp/l/baa127f94f9f4b7ebbb938c62c84cb33 2024-12-03T23:32:40,931 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for baa127f94f9f4b7ebbb938c62c84cb33 2024-12-03T23:32:40,932 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/.tmp/l/baa127f94f9f4b7ebbb938c62c84cb33 as hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/l/baa127f94f9f4b7ebbb938c62c84cb33 2024-12-03T23:32:40,935 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for baa127f94f9f4b7ebbb938c62c84cb33 2024-12-03T23:32:40,935 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/l/baa127f94f9f4b7ebbb938c62c84cb33, entries=2, sequenceid=37, filesize=5.0 K 2024-12-03T23:32:40,936 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 94ece278d222af9bae70064026dc24ae in 32ms, sequenceid=37, compaction requested=false 2024-12-03T23:32:40,939 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/data/hbase/acl/94ece278d222af9bae70064026dc24ae/recovered.edits/40.seqid, newMaxSeqId=40, maxSeqId=31 2024-12-03T23:32:40,940 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:32:40,940 INFO [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:32:40,940 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 94ece278d222af9bae70064026dc24ae: Waiting for close lock at 1733268760904Running coprocessor pre-close hooks at 1733268760904Disabling compacts and flushes for region at 1733268760904Disabling writes for close at 1733268760904Obtaining lock to block concurrent updates at 1733268760904Preparing flush snapshotting stores in 94ece278d222af9bae70064026dc24ae at 1733268760904Finished memstore snapshotting hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae., syncing WAL and waiting on mvcc, flushsize=dataSize=190, getHeapSize=656, getOffHeapSize=0, getCellsCount=3 at 1733268760905 (+1 ms)Flushing stores of hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. at 1733268760905Flushing 94ece278d222af9bae70064026dc24ae/l: creating writer at 1733268760905Flushing 94ece278d222af9bae70064026dc24ae/l: appending metadata at 1733268760922 (+17 ms)Flushing 94ece278d222af9bae70064026dc24ae/l: closing flushed file at 1733268760922Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7247d66d: reopening flushed file at 1733268760931 (+9 ms)Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 94ece278d222af9bae70064026dc24ae in 32ms, sequenceid=37, compaction requested=false at 1733268760936 (+5 ms)Writing region close event to WAL at 1733268760937 (+1 ms)Running coprocessor post-close hooks at 1733268760940 (+3 ms)Closed at 1733268760940 2024-12-03T23:32:40,940 DEBUG [RS_CLOSE_REGION-regionserver/ed8014902436:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733268373530.94ece278d222af9bae70064026dc24ae. 2024-12-03T23:32:40,986 INFO [regionserver/ed8014902436:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T23:32:41,105 INFO [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(976): stopping server ed8014902436,39085,1733268370401; all regions closed. 2024-12-03T23:32:41,105 INFO [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(976): stopping server ed8014902436,41121,1733268370525; all regions closed. 2024-12-03T23:32:41,107 INFO [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(976): stopping server ed8014902436,37351,1733268370600; all regions closed. 2024-12-03T23:32:41,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741836_1012 (size=101602) 2024-12-03T23:32:41,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741836_1012 (size=101602) 2024-12-03T23:32:41,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741835_1011 (size=13270) 2024-12-03T23:32:41,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741836_1012 (size=101602) 2024-12-03T23:32:41,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741835_1011 (size=13270) 2024-12-03T23:32:41,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741834_1010 (size=14472) 2024-12-03T23:32:41,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741835_1011 (size=13270) 2024-12-03T23:32:41,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741834_1010 (size=14472) 2024-12-03T23:32:41,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741834_1010 (size=14472) 2024-12-03T23:32:41,119 DEBUG [RS:2;ed8014902436:37351 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/oldWALs 2024-12-03T23:32:41,119 DEBUG [RS:1;ed8014902436:41121 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/oldWALs 2024-12-03T23:32:41,119 INFO [RS:1;ed8014902436:41121 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ed8014902436%2C41121%2C1733268370525:(num 1733268372551) 2024-12-03T23:32:41,119 INFO [RS:2;ed8014902436:37351 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ed8014902436%2C37351%2C1733268370600.meta:.meta(num 1733268373120) 2024-12-03T23:32:41,119 DEBUG [RS:1;ed8014902436:41121 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:32:41,119 INFO [RS:1;ed8014902436:41121 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T23:32:41,119 INFO [RS:1;ed8014902436:41121 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T23:32:41,119 INFO [RS:1;ed8014902436:41121 {}] hbase.ChoreService(370): Chore service for: regionserver/ed8014902436:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T23:32:41,119 INFO [RS:1;ed8014902436:41121 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T23:32:41,120 INFO [RS:1;ed8014902436:41121 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T23:32:41,120 INFO [regionserver/ed8014902436:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T23:32:41,120 INFO [RS:1;ed8014902436:41121 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T23:32:41,120 INFO [RS:1;ed8014902436:41121 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T23:32:41,120 DEBUG [RS:0;ed8014902436:39085 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/oldWALs 2024-12-03T23:32:41,120 INFO [RS:0;ed8014902436:39085 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ed8014902436%2C39085%2C1733268370401:(num 1733268372552) 2024-12-03T23:32:41,120 DEBUG [RS:0;ed8014902436:39085 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:32:41,120 INFO [RS:0;ed8014902436:39085 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T23:32:41,120 INFO [RS:0;ed8014902436:39085 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T23:32:41,120 INFO [RS:1;ed8014902436:41121 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41121 2024-12-03T23:32:41,120 INFO [RS:0;ed8014902436:39085 {}] hbase.ChoreService(370): Chore service for: regionserver/ed8014902436:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T23:32:41,121 INFO [RS:0;ed8014902436:39085 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T23:32:41,121 INFO [RS:0;ed8014902436:39085 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T23:32:41,121 INFO [regionserver/ed8014902436:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T23:32:41,121 INFO [RS:0;ed8014902436:39085 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T23:32:41,121 INFO [RS:0;ed8014902436:39085 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T23:32:41,121 INFO [RS:0;ed8014902436:39085 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39085 2024-12-03T23:32:41,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073741833_1009 (size=16404) 2024-12-03T23:32:41,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741833_1009 (size=16404) 2024-12-03T23:32:41,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073741833_1009 (size=16404) 2024-12-03T23:32:41,124 DEBUG [RS:2;ed8014902436:37351 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/oldWALs 2024-12-03T23:32:41,124 INFO [RS:2;ed8014902436:37351 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ed8014902436%2C37351%2C1733268370600:(num 1733268372555) 2024-12-03T23:32:41,124 DEBUG [RS:2;ed8014902436:37351 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T23:32:41,124 INFO [RS:2;ed8014902436:37351 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T23:32:41,124 INFO [RS:2;ed8014902436:37351 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T23:32:41,124 INFO [RS:2;ed8014902436:37351 {}] hbase.ChoreService(370): Chore service for: regionserver/ed8014902436:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T23:32:41,124 INFO [RS:2;ed8014902436:37351 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T23:32:41,124 INFO [regionserver/ed8014902436:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T23:32:41,124 INFO [RS:2;ed8014902436:37351 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37351 2024-12-03T23:32:41,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ed8014902436,39085,1733268370401 2024-12-03T23:32:41,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T23:32:41,163 INFO [RS:0;ed8014902436:39085 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T23:32:41,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ed8014902436,41121,1733268370525 2024-12-03T23:32:41,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ed8014902436,37351,1733268370600 2024-12-03T23:32:41,176 INFO [RS:1;ed8014902436:41121 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T23:32:41,176 INFO [RS:2;ed8014902436:37351 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T23:32:41,185 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ed8014902436,41121,1733268370525] 2024-12-03T23:32:41,201 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ed8014902436,41121,1733268370525 already deleted, retry=false 2024-12-03T23:32:41,201 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ed8014902436,41121,1733268370525 expired; onlineServers=2 2024-12-03T23:32:41,201 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ed8014902436,39085,1733268370401] 2024-12-03T23:32:41,209 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ed8014902436,39085,1733268370401 already deleted, retry=false 2024-12-03T23:32:41,209 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ed8014902436,39085,1733268370401 expired; onlineServers=1 2024-12-03T23:32:41,209 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ed8014902436,37351,1733268370600] 2024-12-03T23:32:41,218 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ed8014902436,37351,1733268370600 already deleted, retry=false 2024-12-03T23:32:41,218 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ed8014902436,37351,1733268370600 expired; onlineServers=0 2024-12-03T23:32:41,218 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ed8014902436,43753,1733268369485' ***** 2024-12-03T23:32:41,218 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T23:32:41,218 INFO [M:0;ed8014902436:43753 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T23:32:41,218 INFO [M:0;ed8014902436:43753 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T23:32:41,218 DEBUG [M:0;ed8014902436:43753 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T23:32:41,218 DEBUG [M:0;ed8014902436:43753 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T23:32:41,218 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T23:32:41,218 DEBUG [master/ed8014902436:0:becomeActiveMaster-HFileCleaner.large.0-1733268372081 {}] cleaner.HFileCleaner(306): Exit Thread[master/ed8014902436:0:becomeActiveMaster-HFileCleaner.large.0-1733268372081,5,FailOnTimeoutGroup] 2024-12-03T23:32:41,218 DEBUG [master/ed8014902436:0:becomeActiveMaster-HFileCleaner.small.0-1733268372099 {}] cleaner.HFileCleaner(306): Exit Thread[master/ed8014902436:0:becomeActiveMaster-HFileCleaner.small.0-1733268372099,5,FailOnTimeoutGroup] 2024-12-03T23:32:41,219 INFO [M:0;ed8014902436:43753 {}] hbase.ChoreService(370): Chore service for: master/ed8014902436:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T23:32:41,219 INFO [M:0;ed8014902436:43753 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T23:32:41,219 DEBUG [M:0;ed8014902436:43753 {}] master.HMaster(1795): Stopping service threads 2024-12-03T23:32:41,219 INFO [M:0;ed8014902436:43753 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T23:32:41,219 INFO [M:0;ed8014902436:43753 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T23:32:41,219 INFO [M:0;ed8014902436:43753 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T23:32:41,220 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T23:32:41,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T23:32:41,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T23:32:41,230 DEBUG [M:0;ed8014902436:43753 {}] zookeeper.ZKUtil(347): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T23:32:41,230 WARN [M:0;ed8014902436:43753 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T23:32:41,231 INFO [M:0;ed8014902436:43753 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/.lastflushedseqids 2024-12-03T23:32:41,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39281 is added to blk_1073742474_1650 (size=325) 2024-12-03T23:32:41,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37061 is added to blk_1073742474_1650 (size=325) 2024-12-03T23:32:41,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742474_1650 (size=325) 2024-12-03T23:32:41,242 INFO [M:0;ed8014902436:43753 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T23:32:41,242 INFO [M:0;ed8014902436:43753 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T23:32:41,242 DEBUG [M:0;ed8014902436:43753 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T23:32:41,253 INFO [M:0;ed8014902436:43753 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T23:32:41,253 DEBUG [M:0;ed8014902436:43753 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T23:32:41,253 DEBUG [M:0;ed8014902436:43753 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T23:32:41,253 DEBUG [M:0;ed8014902436:43753 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T23:32:41,253 INFO [M:0;ed8014902436:43753 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=996.40 KB heapSize=1.17 MB 2024-12-03T23:32:41,254 ERROR [AsyncFSWAL-0-hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData-prefix:ed8014902436,43753,1733268369485 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData-prefix:ed8014902436,43753,1733268369485,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T23:32:41,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T23:32:41,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39085-0x1019ecdff490001, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T23:32:41,286 INFO [RS:0;ed8014902436:39085 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T23:32:41,286 INFO [RS:0;ed8014902436:39085 {}] regionserver.HRegionServer(1031): Exiting; stopping=ed8014902436,39085,1733268370401; zookeeper connection closed. 2024-12-03T23:32:41,286 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2485518b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2485518b 2024-12-03T23:32:41,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T23:32:41,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T23:32:41,293 INFO [RS:2;ed8014902436:37351 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T23:32:41,293 INFO [RS:1;ed8014902436:41121 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T23:32:41,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37351-0x1019ecdff490003, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T23:32:41,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41121-0x1019ecdff490002, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T23:32:41,293 INFO [RS:2;ed8014902436:37351 {}] regionserver.HRegionServer(1031): Exiting; stopping=ed8014902436,37351,1733268370600; zookeeper connection closed. 2024-12-03T23:32:41,293 INFO [RS:1;ed8014902436:41121 {}] regionserver.HRegionServer(1031): Exiting; stopping=ed8014902436,41121,1733268370525; zookeeper connection closed. 2024-12-03T23:32:41,294 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@28d1ca36 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@28d1ca36 2024-12-03T23:32:41,294 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6e7432a9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6e7432a9 2024-12-03T23:32:41,294 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-03T23:32:46,432 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:32:49,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:32:49,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T23:32:49,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T23:32:49,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-03T23:32:49,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-03T23:32:49,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:32:49,918 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-03T23:32:49,919 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-03T23:32:55,422 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:33:08,451 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:33:38,451 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;ed8014902436:43753 235 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 77 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@47709661 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 3 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41cb614c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.CountDownLatch$Sync@59e49cdc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13105 Waited count: 13803 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@474feda6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@5d943502 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@210db1f9): State: TIMED_WAITING Blocked count: 0 Waited count: 912 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1788055457-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1788055457-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1788055457-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1788055457-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1788055457-41-acceptor-0@2913c9f2-ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:42843}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1788055457-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1788055457-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1788055457-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-48adee89-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 35 Waited count: 3493 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46246080 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37331): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@37f20e91): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@308b96ef): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 44853 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1518 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4703d8aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37331): State: TIMED_WAITING Blocked count: 130 Waited count: 2696 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37331): State: TIMED_WAITING Blocked count: 125 Waited count: 2685 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37331): State: TIMED_WAITING Blocked count: 116 Waited count: 2680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37331): State: TIMED_WAITING Blocked count: 111 Waited count: 2691 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37331): State: TIMED_WAITING Blocked count: 113 Waited count: 2705 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2f8bc2): State: TIMED_WAITING Blocked count: 0 Waited count: 228 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@641be552): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@54c54640): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@401c0ac3): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1787404574)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1646715981-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1646715981-88-acceptor-0@137ae5e2-ServerConnector@33a58c38{HTTP/1.1, (http/1.1)}{localhost:35253}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1646715981-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1646715981-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-61e651cd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70908ed9): State: TIMED_WAITING Blocked count: 0 Waited count: 910 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44719): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 310 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a701ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1518 Waited count: 1622 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4944581b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 470 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 456 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp965936876-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp965936876-122-acceptor-0@4a0d9492-ServerConnector@439d9c33{HTTP/1.1, (http/1.1)}{localhost:44045}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp965936876-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (790252428) connection to localhost/127.0.0.1:37331 from jenkins): State: TIMED_WAITING Blocked count: 1425 Waited count: 1426 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (qtp965936876-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 0 Waited count: 2243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-40c2a88f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@24b300e4): State: TIMED_WAITING Blocked count: 3 Waited count: 909 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 45585): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 347 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1bbf3b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1493 Waited count: 1623 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7661a068): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 469 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 480 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 476 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 475 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp142999172-153): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp142999172-154-acceptor-0@20ffac82-ServerConnector@6d761894{HTTP/1.1, (http/1.1)}{localhost:37007}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp142999172-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp142999172-156): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-65ea3f7-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5f59958a): State: TIMED_WAITING Blocked count: 0 Waited count: 909 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 40667): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 343 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5167f793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1455 Waited count: 1616 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1195476b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 495 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 455 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1)): State: TIMED_WAITING Blocked count: 32 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data2)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data4)): State: TIMED_WAITING Blocked count: 26 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data4/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data2/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@3ee2e622[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@8f83449[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data6/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@4d6f6e47[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:54632): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 47 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 228 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 10 Waited count: 370 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@231963f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:54632):): State: WAITING Blocked count: 1 Waited count: 462 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57a67d3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 499 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5eadd9a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 248 (LeaseRenewer:jenkins@localhost:37331): State: TIMED_WAITING Blocked count: 13 Waited count: 472 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7f56ded0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 372 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:54632)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 19 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bad99a9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d0a6473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 10 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3463955c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753): State: WAITING Blocked count: 41 Waited count: 294 Waiting on java.util.concurrent.Semaphore$NonfairSync@47f13990 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753): State: WAITING Blocked count: 204 Waited count: 731 Waiting on java.util.concurrent.Semaphore$NonfairSync@568016 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753): State: WAITING Blocked count: 59 Waited count: 9685 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e514593 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43753): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@630ec70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@630ec70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@48c3689f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ca117a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@58f8a87b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@592a8c78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a7e9052 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 73 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;ed8014902436:43753): State: TIMED_WAITING Blocked count: 12 Waited count: 3913 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1116/0x00007f8044f95250.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@5c1ef90c): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 377 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4495 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 122 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 127 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 419 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44892 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b5d2b28 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 478 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@779c2e64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@523a2d3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@758e89d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 503 (LeaseRenewer:jenkins.hfs.2@localhost:37331): State: TIMED_WAITING Blocked count: 13 Waited count: 472 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 505 (LeaseRenewer:jenkins.hfs.1@localhost:37331): State: TIMED_WAITING Blocked count: 13 Waited count: 470 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 508 (LeaseRenewer:jenkins.hfs.0@localhost:37331): State: TIMED_WAITING Blocked count: 12 Waited count: 471 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 523 (region-location-0): State: WAITING Blocked count: 8 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44733 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 539 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 538 Waiting on java.util.concurrent.ForkJoinPool@31a11da7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 583 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1005 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 74 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33b072d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1649 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@53c9b550 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2009 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2010 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2136 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2144 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2325 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 813 Waiting on java.util.concurrent.ForkJoinPool@31a11da7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 5702 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 138 Waiting on java.util.concurrent.ForkJoinPool@31a11da7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 9056 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 166 Waiting on java.util.concurrent.ForkJoinPool@31a11da7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 9595 (ForkJoinPool.commonPool-worker-7): State: TIMED_WAITING Blocked count: 0 Waited count: 230 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11458 (AsyncFSWAL-1-hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData-prefix:ed8014902436,43753,1733268369485): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a3cd6ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11462 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-03T23:34:08,452 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:34:38,452 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;ed8014902436:43753 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 77 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@47709661 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 3 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41cb614c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5185 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.CountDownLatch$Sync@648e0aab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13105 Waited count: 13804 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@474feda6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@5d943502 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@210db1f9): State: TIMED_WAITING Blocked count: 0 Waited count: 1032 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1788055457-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1788055457-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1788055457-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1788055457-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1788055457-41-acceptor-0@2913c9f2-ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:42843}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1788055457-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1788055457-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1788055457-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-48adee89-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 35 Waited count: 3493 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46246080 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37331): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@37f20e91): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@308b96ef): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 50780 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1518 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4703d8aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37331): State: TIMED_WAITING Blocked count: 135 Waited count: 2756 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37331): State: TIMED_WAITING Blocked count: 125 Waited count: 2745 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37331): State: TIMED_WAITING Blocked count: 116 Waited count: 2740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37331): State: TIMED_WAITING Blocked count: 111 Waited count: 2751 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37331): State: TIMED_WAITING Blocked count: 113 Waited count: 2765 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2f8bc2): State: TIMED_WAITING Blocked count: 0 Waited count: 258 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@641be552): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@54c54640): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@401c0ac3): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1787404574)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1646715981-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1646715981-88-acceptor-0@137ae5e2-ServerConnector@33a58c38{HTTP/1.1, (http/1.1)}{localhost:35253}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1646715981-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1646715981-90): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-61e651cd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70908ed9): State: TIMED_WAITING Blocked count: 0 Waited count: 1030 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44719): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 330 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a701ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1538 Waited count: 1664 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4944581b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 525 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 530 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 520 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 516 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp965936876-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp965936876-122-acceptor-0@4a0d9492-ServerConnector@439d9c33{HTTP/1.1, (http/1.1)}{localhost:44045}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp965936876-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (790252428) connection to localhost/127.0.0.1:37331 from jenkins): State: TIMED_WAITING Blocked count: 1474 Waited count: 1475 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (qtp965936876-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 0 Waited count: 2298 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-40c2a88f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@24b300e4): State: TIMED_WAITING Blocked count: 3 Waited count: 1029 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 45585): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 367 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1bbf3b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1513 Waited count: 1666 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7661a068): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 529 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 540 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 536 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 533 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 535 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp142999172-153): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp142999172-154-acceptor-0@20ffac82-ServerConnector@6d761894{HTTP/1.1, (http/1.1)}{localhost:37007}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp142999172-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp142999172-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-65ea3f7-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5f59958a): State: TIMED_WAITING Blocked count: 0 Waited count: 1029 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 40667): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 363 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5167f793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1475 Waited count: 1656 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1195476b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 555 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 561 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1)): State: TIMED_WAITING Blocked count: 32 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data2)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data4)): State: TIMED_WAITING Blocked count: 26 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data4/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data2/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@3ee2e622[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@8f83449[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data6/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@4d6f6e47[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:54632): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 258 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 10 Waited count: 375 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@231963f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:54632):): State: WAITING Blocked count: 1 Waited count: 467 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57a67d3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 504 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5eadd9a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7f56ded0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 400 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:54632)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 19 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bad99a9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d0a6473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 10 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3463955c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753): State: WAITING Blocked count: 41 Waited count: 294 Waiting on java.util.concurrent.Semaphore$NonfairSync@47f13990 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753): State: WAITING Blocked count: 204 Waited count: 731 Waiting on java.util.concurrent.Semaphore$NonfairSync@568016 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753): State: WAITING Blocked count: 59 Waited count: 9685 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e514593 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43753): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@630ec70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@630ec70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@48c3689f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ca117a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@58f8a87b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@592a8c78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a7e9052 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 73 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;ed8014902436:43753): State: TIMED_WAITING Blocked count: 12 Waited count: 3913 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1116/0x00007f8044f95250.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@5c1ef90c): State: TIMED_WAITING Blocked count: 0 Waited count: 171 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 377 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5094 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 122 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 127 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 169 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10f6beb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 419 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50894 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b5d2b28 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 478 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@779c2e64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@523a2d3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@758e89d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 523 (region-location-0): State: WAITING Blocked count: 8 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50735 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 539 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 538 Waiting on java.util.concurrent.ForkJoinPool@31a11da7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 583 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1011 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 74 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33b072d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1649 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@53c9b550 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2009 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2010 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2136 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2144 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2325 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 814 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 5702 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 138 Waiting on java.util.concurrent.ForkJoinPool@31a11da7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 9056 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 166 Waiting on java.util.concurrent.ForkJoinPool@31a11da7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11458 (AsyncFSWAL-1-hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData-prefix:ed8014902436,43753,1733268369485): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a3cd6ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11462 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-03T23:35:08,452 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:35:38,453 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;ed8014902436:43753 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 77 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@47709661 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 3 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41cb614c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5784 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 58 Waiting on java.util.concurrent.CountDownLatch$Sync@7af3405f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13105 Waited count: 13805 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@474feda6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@5d943502 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@210db1f9): State: TIMED_WAITING Blocked count: 0 Waited count: 1152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1788055457-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1788055457-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1788055457-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1788055457-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1788055457-41-acceptor-0@2913c9f2-ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:42843}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1788055457-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1788055457-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1788055457-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-48adee89-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 35 Waited count: 3493 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46246080 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37331): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@37f20e91): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 193 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@308b96ef): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 56707 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1518 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4703d8aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37331): State: TIMED_WAITING Blocked count: 135 Waited count: 2816 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37331): State: TIMED_WAITING Blocked count: 125 Waited count: 2805 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37331): State: TIMED_WAITING Blocked count: 116 Waited count: 2800 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37331): State: TIMED_WAITING Blocked count: 111 Waited count: 2811 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37331): State: TIMED_WAITING Blocked count: 113 Waited count: 2826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2f8bc2): State: TIMED_WAITING Blocked count: 0 Waited count: 288 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@641be552): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@54c54640): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@401c0ac3): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1787404574)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1646715981-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1646715981-88-acceptor-0@137ae5e2-ServerConnector@33a58c38{HTTP/1.1, (http/1.1)}{localhost:35253}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1646715981-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1646715981-90): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-61e651cd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70908ed9): State: TIMED_WAITING Blocked count: 0 Waited count: 1150 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44719): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 350 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a701ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1558 Waited count: 1704 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4944581b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 590 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp965936876-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp965936876-122-acceptor-0@4a0d9492-ServerConnector@439d9c33{HTTP/1.1, (http/1.1)}{localhost:44045}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp965936876-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (790252428) connection to localhost/127.0.0.1:37331 from jenkins): State: TIMED_WAITING Blocked count: 1533 Waited count: 1534 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (qtp965936876-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 0 Waited count: 2358 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-40c2a88f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@24b300e4): State: TIMED_WAITING Blocked count: 3 Waited count: 1149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 45585): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 387 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1bbf3b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1533 Waited count: 1706 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7661a068): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 589 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 600 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 596 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 593 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 595 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp142999172-153): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp142999172-154-acceptor-0@20ffac82-ServerConnector@6d761894{HTTP/1.1, (http/1.1)}{localhost:37007}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp142999172-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp142999172-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-65ea3f7-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5f59958a): State: TIMED_WAITING Blocked count: 0 Waited count: 1149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 40667): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 383 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5167f793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1495 Waited count: 1696 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1195476b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 615 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 639 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 575 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1)): State: TIMED_WAITING Blocked count: 32 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data2)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data4)): State: TIMED_WAITING Blocked count: 26 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data4/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data2/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@3ee2e622[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@8f83449[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data6/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@4d6f6e47[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:54632): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 288 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 10 Waited count: 379 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@231963f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:54632):): State: WAITING Blocked count: 1 Waited count: 471 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57a67d3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 508 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5eadd9a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7f56ded0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 428 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:54632)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 19 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bad99a9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d0a6473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 10 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3463955c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753): State: WAITING Blocked count: 41 Waited count: 294 Waiting on java.util.concurrent.Semaphore$NonfairSync@47f13990 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753): State: WAITING Blocked count: 204 Waited count: 731 Waiting on java.util.concurrent.Semaphore$NonfairSync@568016 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753): State: WAITING Blocked count: 59 Waited count: 9685 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e514593 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43753): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@630ec70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@630ec70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@48c3689f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ca117a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@58f8a87b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@592a8c78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a7e9052 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 73 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;ed8014902436:43753): State: TIMED_WAITING Blocked count: 12 Waited count: 3913 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1116/0x00007f8044f95250.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@5c1ef90c): State: TIMED_WAITING Blocked count: 0 Waited count: 191 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 377 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5693 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 122 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 127 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 169 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10f6beb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 419 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56897 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b5d2b28 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 478 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@779c2e64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@523a2d3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@758e89d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 523 (region-location-0): State: WAITING Blocked count: 8 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56738 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 539 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 539 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 583 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1017 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 74 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33b072d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1649 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@53c9b550 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2009 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2010 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2136 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2144 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5702 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 138 Waiting on java.util.concurrent.ForkJoinPool@31a11da7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 9056 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 166 Waiting on java.util.concurrent.ForkJoinPool@31a11da7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11458 (AsyncFSWAL-1-hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData-prefix:ed8014902436,43753,1733268369485): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a3cd6ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11462 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11463 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-03T23:36:08,453 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:36:10,766 DEBUG [master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=25, reuseRatio=71.43% 2024-12-03T23:36:10,766 DEBUG [master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-03T23:36:18,691 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-03T23:36:38,454 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;ed8014902436:43753 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 77 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@47709661 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 3 Waited count: 36 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41cb614c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 64 Waiting on java.util.concurrent.CountDownLatch$Sync@665c782c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13105 Waited count: 13806 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@474feda6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@5d943502 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@210db1f9): State: TIMED_WAITING Blocked count: 0 Waited count: 1272 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1788055457-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1788055457-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1788055457-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1788055457-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1788055457-41-acceptor-0@2913c9f2-ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:42843}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1788055457-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1788055457-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1788055457-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-48adee89-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 35 Waited count: 3493 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46246080 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37331): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@37f20e91): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 213 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@308b96ef): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 212 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 62635 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1518 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4703d8aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37331): State: TIMED_WAITING Blocked count: 135 Waited count: 2876 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37331): State: TIMED_WAITING Blocked count: 125 Waited count: 2865 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37331): State: TIMED_WAITING Blocked count: 116 Waited count: 2860 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37331): State: TIMED_WAITING Blocked count: 111 Waited count: 2871 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37331): State: TIMED_WAITING Blocked count: 113 Waited count: 2886 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2f8bc2): State: TIMED_WAITING Blocked count: 0 Waited count: 318 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@641be552): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@54c54640): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@401c0ac3): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1787404574)): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1646715981-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1646715981-88-acceptor-0@137ae5e2-ServerConnector@33a58c38{HTTP/1.1, (http/1.1)}{localhost:35253}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1646715981-89): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1646715981-90): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-61e651cd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70908ed9): State: TIMED_WAITING Blocked count: 0 Waited count: 1270 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44719): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 370 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a701ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1578 Waited count: 1744 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4944581b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 645 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 640 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 636 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp965936876-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp965936876-122-acceptor-0@4a0d9492-ServerConnector@439d9c33{HTTP/1.1, (http/1.1)}{localhost:44045}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp965936876-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (790252428) connection to localhost/127.0.0.1:37331 from jenkins): State: TIMED_WAITING Blocked count: 1593 Waited count: 1594 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (qtp965936876-124): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 0 Waited count: 2418 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-40c2a88f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@24b300e4): State: TIMED_WAITING Blocked count: 3 Waited count: 1269 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 45585): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 407 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1bbf3b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1553 Waited count: 1746 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7661a068): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 649 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 660 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 656 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 653 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 655 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp142999172-153): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp142999172-154-acceptor-0@20ffac82-ServerConnector@6d761894{HTTP/1.1, (http/1.1)}{localhost:37007}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp142999172-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp142999172-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-65ea3f7-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5f59958a): State: TIMED_WAITING Blocked count: 0 Waited count: 1269 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 40667): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 403 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5167f793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1515 Waited count: 1736 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1195476b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 699 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 696 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 635 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1)): State: TIMED_WAITING Blocked count: 32 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data2)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data4)): State: TIMED_WAITING Blocked count: 26 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data4/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data2/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@280ebeea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d94b9fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@3ee2e622[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@8f83449[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data6/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8e02c40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@4d6f6e47[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:54632): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 318 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 10 Waited count: 383 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@231963f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:54632):): State: WAITING Blocked count: 1 Waited count: 475 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57a67d3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 512 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5eadd9a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7f56ded0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 459 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:54632)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 19 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bad99a9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d0a6473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 10 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3463955c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753): State: WAITING Blocked count: 41 Waited count: 294 Waiting on java.util.concurrent.Semaphore$NonfairSync@47f13990 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753): State: WAITING Blocked count: 204 Waited count: 731 Waiting on java.util.concurrent.Semaphore$NonfairSync@568016 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753): State: WAITING Blocked count: 59 Waited count: 9685 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e514593 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43753): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@630ec70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@630ec70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@48c3689f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ca117a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@58f8a87b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@592a8c78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a7e9052 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 73 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;ed8014902436:43753): State: TIMED_WAITING Blocked count: 12 Waited count: 3913 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1116/0x00007f8044f95250.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@5c1ef90c): State: TIMED_WAITING Blocked count: 0 Waited count: 211 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 377 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6293 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 122 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 127 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 169 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10f6beb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 419 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 62899 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b5d2b28 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 478 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@779c2e64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@523a2d3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@758e89d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 523 (region-location-0): State: WAITING Blocked count: 8 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 62740 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 539 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1023 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 74 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33b072d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1649 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@53c9b550 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2009 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2010 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2136 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2144 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5702 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 138 Waiting on java.util.concurrent.ForkJoinPool@31a11da7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 9056 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 167 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11458 (AsyncFSWAL-1-hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData-prefix:ed8014902436,43753,1733268369485): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a3cd6ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11463 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11468 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-03T23:37:08,454 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:37:38,454 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T23:37:41,256 DEBUG [M:0;ed8014902436:43753 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733268761242Disabling compacts and flushes for region at 1733268761242Disabling writes for close at 1733268761253 (+11 ms)Obtaining lock to block concurrent updates at 1733268761253Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733268761253Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1020313, getHeapSize=1222824, getOffHeapSize=0, getCellsCount=2672 at 1733268761254 (+1 ms)Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1733269061256 (+300002 ms) 2024-12-03T23:37:41,256 WARN [M:0;ed8014902436:43753 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4594, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4594, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-12-03T23:37:41,260 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T23:37:41,263 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-03T23:37:41,263 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-03T23:37:41,263 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/WALs/ed8014902436,43753,1733268369485/ed8014902436%2C43753%2C1733268369485.1733268371251 2024-12-03T23:37:41,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/WALs/ed8014902436,43753,1733268369485/ed8014902436%2C43753%2C1733268369485.1733268371251 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T23:37:41,265 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T23:37:41,265 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/WALs/ed8014902436,43753,1733268369485/ed8014902436%2C43753%2C1733268369485.1733268371251 2024-12-03T23:37:41,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/WALs/ed8014902436,43753,1733268369485/ed8014902436%2C43753%2C1733268369485.1733268371251 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;ed8014902436:43753 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 77 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@47709661 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 3 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41cb614c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6983 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 70 Waiting on java.util.concurrent.CountDownLatch$Sync@1282aabb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13105 Waited count: 13807 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@474feda6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@5d943502 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@210db1f9): State: TIMED_WAITING Blocked count: 0 Waited count: 1392 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1788055457-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1788055457-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1788055457-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1788055457-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1788055457-41-acceptor-0@2913c9f2-ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:42843}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1788055457-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1788055457-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1788055457-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-48adee89-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 35 Waited count: 3493 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46246080 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37331): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@37f20e91): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 233 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@308b96ef): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 232 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 68564 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1518 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4703d8aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37331): State: TIMED_WAITING Blocked count: 146 Waited count: 2936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37331): State: TIMED_WAITING Blocked count: 125 Waited count: 2925 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37331): State: TIMED_WAITING Blocked count: 119 Waited count: 2920 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37331): State: TIMED_WAITING Blocked count: 111 Waited count: 2931 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37331): State: TIMED_WAITING Blocked count: 113 Waited count: 2946 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2f8bc2): State: TIMED_WAITING Blocked count: 0 Waited count: 348 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@641be552): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@54c54640): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@401c0ac3): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1787404574)): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1646715981-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1646715981-88-acceptor-0@137ae5e2-ServerConnector@33a58c38{HTTP/1.1, (http/1.1)}{localhost:35253}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1646715981-89): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1646715981-90): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-61e651cd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70908ed9): State: TIMED_WAITING Blocked count: 0 Waited count: 1390 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44719): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 390 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a701ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1599 Waited count: 1792 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4944581b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 705 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 710 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 700 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 696 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44719): State: TIMED_WAITING Blocked count: 0 Waited count: 703 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp965936876-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp965936876-122-acceptor-0@4a0d9492-ServerConnector@439d9c33{HTTP/1.1, (http/1.1)}{localhost:44045}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp965936876-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (790252428) connection to localhost/127.0.0.1:37331 from jenkins): State: RUNNABLE Blocked count: 1636 Waited count: 1636 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.FilterInputStream.read(FilterInputStream.java:132) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.FilterInputStream.read(FilterInputStream.java:82) java.base@17.0.11/java.io.FilterInputStream.read(FilterInputStream.java:82) app//org.apache.hadoop.ipc.Client$Connection$PingInputStream.read(Client.java:518) java.base@17.0.11/java.io.DataInputStream.readInt(DataInputStream.java:381) app//org.apache.hadoop.ipc.Client$IpcStreams.readResponse(Client.java:1923) app//org.apache.hadoop.ipc.Client$Connection.receiveRpcResponse(Client.java:1203) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1094) Thread 124 (qtp965936876-124): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 0 Waited count: 2467 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-40c2a88f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@24b300e4): State: TIMED_WAITING Blocked count: 3 Waited count: 1389 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 45585): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 427 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1bbf3b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1575 Waited count: 1790 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7661a068): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 709 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 720 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 716 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 713 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 45585): State: TIMED_WAITING Blocked count: 0 Waited count: 715 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp142999172-153): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f804442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp142999172-154-acceptor-0@20ffac82-ServerConnector@6d761894{HTTP/1.1, (http/1.1)}{localhost:37007}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp142999172-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp142999172-156): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-65ea3f7-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5f59958a): State: TIMED_WAITING Blocked count: 0 Waited count: 1389 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 40667): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 424 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5167f793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331): State: TIMED_WAITING Blocked count: 1536 Waited count: 1778 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1195476b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 749 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 759 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 756 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 40667): State: TIMED_WAITING Blocked count: 0 Waited count: 695 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1)): State: TIMED_WAITING Blocked count: 32 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data2)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data4)): State: TIMED_WAITING Blocked count: 26 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data4/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data2/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@280ebeea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d94b9fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@3ee2e622[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@8f83449[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data6/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5/current/BP-1874256318-172.17.0.2-1733268364664): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8e02c40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@4d6f6e47[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:54632): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 71 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 348 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 10 Waited count: 388 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@231963f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:54632):): State: WAITING Blocked count: 1 Waited count: 480 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57a67d3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 517 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5eadd9a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7f56ded0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:54632)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 19 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bad99a9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d0a6473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 6 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 10 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17382936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3463955c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43753): State: WAITING Blocked count: 41 Waited count: 294 Waiting on java.util.concurrent.Semaphore$NonfairSync@47f13990 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43753): State: WAITING Blocked count: 204 Waited count: 731 Waiting on java.util.concurrent.Semaphore$NonfairSync@568016 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43753): State: WAITING Blocked count: 59 Waited count: 9685 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e514593 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43753): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@630ec70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@630ec70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@48c3689f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ca117a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@58f8a87b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43753): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@592a8c78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a7e9052 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 73 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;ed8014902436:43753): State: TIMED_WAITING Blocked count: 12 Waited count: 3914 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1445/0x00007f8045236c10.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/ed8014902436:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@5c1ef90c): State: TIMED_WAITING Blocked count: 0 Waited count: 231 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 377 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6892 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 122 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 127 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 169 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10f6beb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 419 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68901 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b5d2b28 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 478 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@779c2e64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@523a2d3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 477 (regionserver/ed8014902436:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@758e89d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 523 (region-location-0): State: WAITING Blocked count: 8 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68742 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 539 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1029 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 74 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33b072d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1649 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@53c9b550 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2009 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2010 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2758e826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2136 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2144 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5702 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11458 (AsyncFSWAL-1-hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData-prefix:ed8014902436,43753,1733268369485): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a3cd6ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11463 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11468 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11471 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11472 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1431/0x00007f804522d8b0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-03T23:37:45,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/WALs/ed8014902436,43753,1733268369485/ed8014902436%2C43753%2C1733268369485.1733268371251 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T23:37:46,260 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-03T23:37:46,261 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T23:37:46,261 INFO [M:0;ed8014902436:43753 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T23:37:46,262 INFO [M:0;ed8014902436:43753 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43753 2024-12-03T23:37:46,262 INFO [M:0;ed8014902436:43753 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T23:37:46,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37331/user/jenkins/test-data/5984d460-dd4b-ee74-bf8b-e1d84c964e3b/MasterData/WALs/ed8014902436,43753,1733268369485/ed8014902436%2C43753%2C1733268369485.1733268371251 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-03T23:37:46,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T23:37:46,387 INFO [M:0;ed8014902436:43753 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T23:37:46,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43753-0x1019ecdff490000, quorum=127.0.0.1:54632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T23:37:46,426 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41fe4483{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T23:37:46,426 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d761894{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T23:37:46,426 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T23:37:46,427 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fa1fc7f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T23:37:46,427 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49217e61{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,STOPPED} 2024-12-03T23:37:46,429 WARN [BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T23:37:46,429 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T23:37:46,429 WARN [BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1874256318-172.17.0.2-1733268364664 (Datanode Uuid 279ee435-d4d4-4708-b3bc-a2799a7600c7) service to localhost/127.0.0.1:37331 2024-12-03T23:37:46,429 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T23:37:46,430 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data5/current/BP-1874256318-172.17.0.2-1733268364664 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T23:37:46,431 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data6/current/BP-1874256318-172.17.0.2-1733268364664 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T23:37:46,431 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T23:37:46,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f639787{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T23:37:46,443 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@439d9c33{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T23:37:46,443 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T23:37:46,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fcad17b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T23:37:46,444 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8c924b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,STOPPED} 2024-12-03T23:37:46,446 WARN [BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T23:37:46,446 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T23:37:46,446 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T23:37:46,446 WARN [BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1874256318-172.17.0.2-1733268364664 (Datanode Uuid e208b783-b4f1-487a-9462-89cb6391e1cd) service to localhost/127.0.0.1:37331 2024-12-03T23:37:46,447 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data3/current/BP-1874256318-172.17.0.2-1733268364664 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T23:37:46,447 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data4/current/BP-1874256318-172.17.0.2-1733268364664 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T23:37:46,447 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T23:37:46,450 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b343855{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T23:37:46,450 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33a58c38{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T23:37:46,450 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T23:37:46,451 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66b0eb98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T23:37:46,451 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b6c5d7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,STOPPED} 2024-12-03T23:37:46,452 WARN [BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T23:37:46,452 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T23:37:46,452 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T23:37:46,452 WARN [BP-1874256318-172.17.0.2-1733268364664 heartbeating to localhost/127.0.0.1:37331 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1874256318-172.17.0.2-1733268364664 (Datanode Uuid ae93b843-ed0e-4a0a-9acd-7597bf4b3d80) service to localhost/127.0.0.1:37331 2024-12-03T23:37:46,453 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data1/current/BP-1874256318-172.17.0.2-1733268364664 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T23:37:46,453 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/cluster_92a58d70-cd1b-261e-386d-6c4662e19adb/data/data2/current/BP-1874256318-172.17.0.2-1733268364664 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T23:37:46,453 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T23:37:46,459 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@150ffd7b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T23:37:46,460 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T23:37:46,460 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T23:37:46,460 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15a5d53b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T23:37:46,460 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72770802{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/0362ca44-8737-06ef-8689-ff1154e01458/hadoop.log.dir/,STOPPED} 2024-12-03T23:37:46,480 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T23:37:46,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down